aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.ci/premerge_advisor_upload.py7
-rw-r--r--.github/CODEOWNERS1
-rw-r--r--.github/workflows/premerge.yaml2
-rw-r--r--clang-tools-extra/clang-doc/Generators.cpp14
-rw-r--r--clang-tools-extra/clang-tidy/ClangTidyForceLinker.h51
-rw-r--r--clang-tools-extra/clang-tidy/abseil/RedundantStrcatCallsCheck.cpp16
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/MultipleStatementMacroCheck.cpp11
-rw-r--r--clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp53
-rw-r--r--clang-tools-extra/clang-tidy/misc/CoroutineHostileRAIICheck.cpp4
-rw-r--r--clang-tools-extra/clang-tidy/misc/NoRecursionCheck.cpp6
-rw-r--r--clang-tools-extra/clang-tidy/readability/UppercaseLiteralSuffixCheck.cpp16
-rw-r--r--clang-tools-extra/clang-tidy/utils/DeclRefExprUtils.cpp22
-rw-r--r--clang-tools-extra/clang-tidy/utils/DesignatedInitializers.cpp15
-rw-r--r--clang-tools-extra/clangd/FindTarget.cpp2
-rw-r--r--clang-tools-extra/clangd/unittests/FileDistanceTests.cpp2
-rw-r--r--clang-tools-extra/clangd/unittests/QualityTests.cpp2
-rw-r--r--clang-tools-extra/clangd/unittests/URITests.cpp2
-rw-r--r--clang/docs/ClangFormatStyleOptions.rst83
-rw-r--r--clang/docs/InternalsManual.rst110
-rw-r--r--clang/docs/OpenMPSupport.rst6
-rw-r--r--clang/docs/ReleaseNotes.rst14
-rw-r--r--clang/include/clang/AST/ASTContext.h10
-rw-r--r--clang/include/clang/Basic/Attr.td17
-rw-r--r--clang/include/clang/Basic/AttrDocs.td39
-rw-r--r--clang/include/clang/Basic/Builtins.td12
-rw-r--r--clang/include/clang/Basic/BuiltinsX86.td9
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td6
-rw-r--r--clang/include/clang/Basic/arm_mve.td24
-rw-r--r--clang/include/clang/Basic/arm_mve_defs.td7
-rw-r--r--clang/include/clang/Basic/riscv_vector.td83
-rw-r--r--clang/include/clang/Basic/riscv_vector_common.td68
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIRAttrs.td43
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIROps.td9
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h14
-rw-r--r--clang/include/clang/CIR/MissingFeatures.h15
-rw-r--r--clang/include/clang/Format/Format.h86
-rw-r--r--clang/include/clang/Sema/Sema.h22
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h17
-rw-r--r--clang/lib/AST/ByteCode/Interp.h2
-rw-r--r--clang/lib/AST/ByteCode/InterpBlock.cpp13
-rw-r--r--clang/lib/AST/ByteCode/InterpBlock.h2
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp18
-rw-r--r--clang/lib/AST/ByteCode/Program.cpp6
-rw-r--r--clang/lib/AST/Comment.cpp8
-rw-r--r--clang/lib/AST/ExprConstant.cpp8
-rw-r--r--clang/lib/AST/Stmt.cpp16
-rw-r--r--clang/lib/AST/StmtPrinter.cpp4
-rw-r--r--clang/lib/AST/TemplateBase.cpp15
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXX.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp87
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenValue.h12
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp41
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp7
-rw-r--r--clang/lib/CodeGen/CGHLSLBuiltins.cpp23
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.h1
-rw-r--r--clang/lib/CodeGen/CodeGenTBAA.cpp3
-rw-r--r--clang/lib/CodeGen/Targets/AMDGPU.cpp20
-rw-r--r--clang/lib/Driver/ToolChains/HLSL.cpp38
-rw-r--r--clang/lib/Format/BreakableToken.cpp6
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp11
-rw-r--r--clang/lib/Format/Format.cpp25
-rw-r--r--clang/lib/Format/FormatToken.cpp13
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp33
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp2
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp6
-rw-r--r--clang/lib/Headers/__clang_hip_runtime_wrapper.h2
-rw-r--r--clang/lib/Headers/avx2intrin.h8
-rw-r--r--clang/lib/Headers/avx512bwintrin.h15
-rw-r--r--clang/lib/Headers/avx512vlbwintrin.h8
-rw-r--r--clang/lib/Headers/tmmintrin.h13
-rw-r--r--clang/lib/Parse/ParseTemplate.cpp6
-rw-r--r--clang/lib/Sema/DeclSpec.cpp3
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp80
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h2
-rw-r--r--clang/lib/Sema/HLSLExternalSemaSource.cpp11
-rw-r--r--clang/lib/Sema/SemaConcept.cpp50
-rw-r--r--clang/lib/Sema/SemaDecl.cpp136
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp114
-rw-r--r--clang/lib/Sema/SemaExpr.cpp14
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp18
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp3
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp114
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp40
-rw-r--r--clang/lib/Sema/TreeTransform.h49
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h2
-rw-r--r--clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerManager.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp62
-rw-r--r--clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp10
-rw-r--r--clang/lib/Support/RISCVVIntrinsicUtils.cpp5
-rw-r--r--clang/lib/Tooling/CompilationDatabase.cpp2
-rw-r--r--clang/lib/Tooling/Execution.cpp4
-rw-r--r--clang/lib/Tooling/Syntax/BuildTree.cpp6
-rw-r--r--clang/test/AST/ByteCode/typeid.cpp7
-rw-r--r--clang/test/AST/HLSL/ByteAddressBuffers-AST.hlsl14
-rw-r--r--clang/test/AST/HLSL/StructuredBuffers-AST.hlsl22
-rw-r--r--clang/test/AST/HLSL/TypedBuffers-AST.hlsl14
-rw-r--r--clang/test/Analysis/analyzer-stats/entry-point-stats.cpp4
-rw-r--r--clang/test/CIR/CodeGen/array.cpp20
-rw-r--r--clang/test/CIR/CodeGen/assign-operator.cpp4
-rw-r--r--clang/test/CIR/CodeGen/binassign.c4
-rw-r--r--clang/test/CIR/CodeGen/bitfields_be.c4
-rw-r--r--clang/test/CIR/CodeGen/builtin_call.cpp2
-rw-r--r--clang/test/CIR/CodeGen/builtin_printf.cpp2
-rw-r--r--clang/test/CIR/CodeGen/call.c24
-rw-r--r--clang/test/CIR/CodeGen/call.cpp16
-rw-r--r--clang/test/CIR/CodeGen/cmp.cpp16
-rw-r--r--clang/test/CIR/CodeGen/comma.c4
-rw-r--r--clang/test/CIR/CodeGen/ctor.cpp2
-rw-r--r--clang/test/CIR/CodeGen/dtors.cpp10
-rw-r--r--clang/test/CIR/CodeGen/inline-attributes.cpp75
-rw-r--r--clang/test/CIR/CodeGen/label.c10
-rw-r--r--clang/test/CIR/CodeGen/lambda-static-invoker.cpp14
-rw-r--r--clang/test/CIR/CodeGen/lambda.cpp26
-rw-r--r--clang/test/CIR/CodeGen/linkage-spec.cpp28
-rw-r--r--clang/test/CIR/CodeGen/loop.cpp20
-rw-r--r--clang/test/CIR/CodeGen/member-functions.cpp4
-rw-r--r--clang/test/CIR/CodeGen/nrvo.cpp4
-rw-r--r--clang/test/CIR/CodeGen/ternary.cpp4
-rw-r--r--clang/test/CIR/CodeGen/throws.cpp48
-rw-r--r--clang/test/CIR/CodeGen/vbase.cpp10
-rw-r--r--clang/test/CIR/CodeGen/vla.c8
-rw-r--r--clang/test/CIR/CodeGen/vtt.cpp4
-rw-r--r--clang/test/CIR/CodeGenOpenACC/cache.c2
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-copy.c6
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp2
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp2
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-copy.c4
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c2
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp2
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-private-clause.c2
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp2
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp2
-rw-r--r--clang/test/CIR/IR/inline-attrs.cir33
-rw-r--r--clang/test/CIR/IR/invalid-try-catch.cir5
-rw-r--r--clang/test/CIR/Lowering/basic.cpp4
-rw-r--r--clang/test/CIR/Lowering/func-simple.cpp10
-rw-r--r--clang/test/CIR/func-simple.cpp14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c134
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c189
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c724
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c113
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c267
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c899
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c366
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c486
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c486
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c455
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c494
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c494
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c899
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c134
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c724
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c113
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c267
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c893
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c366
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c474
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c474
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c451
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c480
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c480
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c893
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c1577
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c233
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c572
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c2007
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c765
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c1017
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c1017
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c1015
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c1034
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c1034
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c2007
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c1539
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c233
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c572
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c1932
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c765
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c977
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c977
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c975
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c994
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c994
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c1932
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c129
-rw-r--r--clang/test/CodeGen/X86/avx2-builtins.c1
-rw-r--r--clang/test/CodeGen/X86/avx512bw-builtins.c6
-rw-r--r--clang/test/CodeGen/X86/avx512vlbw-builtins.c4
-rw-r--r--clang/test/CodeGen/X86/mmx-builtins.c1
-rw-r--r--clang/test/CodeGen/X86/ssse3-builtins.c1
-rw-r--r--clang/test/CodeGenCUDA/Inputs/cuda.h4
-rw-r--r--clang/test/CodeGenCUDA/cluster_dims.cu38
-rw-r--r--clang/test/CodeGenHLSL/Operators/logical-not.hlsl33
-rw-r--r--clang/test/CodeGenHLSL/resources/ByteAddressBuffers-methods.hlsl45
-rw-r--r--clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl48
-rw-r--r--clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl35
-rw-r--r--clang/test/CodeGenHLSL/resources/TypedBuffers-methods.hlsl32
-rw-r--r--clang/test/Driver/linker-wrapper.c4
-rw-r--r--clang/test/Misc/pragma-attribute-supported-attributes-list.test2
-rw-r--r--clang/test/Parser/c2x-auto.c27
-rw-r--r--clang/test/Sema/attr-print.c3
-rw-r--r--clang/test/SemaCUDA/Inputs/cuda.h2
-rw-r--r--clang/test/SemaCUDA/cluster_dims.cu64
-rw-r--r--clang/test/SemaCXX/cxx2c-template-template-param.cpp84
-rw-r--r--clang/test/SemaHLSL/Language/TemplateOutArg.hlsl83
-rw-r--r--clang/test/SemaHLSL/Operators/logical-not.hlsl53
-rw-r--r--clang/test/SemaTemplate/concepts.cpp25
-rw-r--r--clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp4
-rw-r--r--clang/unittests/Format/AlignBracketsTest.cpp13
-rw-r--r--clang/unittests/Format/ConfigParseTest.cpp20
-rw-r--r--clang/unittests/Format/FormatTest.cpp12
-rw-r--r--clang/unittests/Format/FormatTestCSharp.cpp2
-rw-r--r--clang/unittests/Format/FormatTestComments.cpp52
-rw-r--r--clang/unittests/Format/FormatTestJava.cpp2
-rw-r--r--clang/unittests/Format/FormatTestTextProto.cpp2
-rw-r--r--clang/unittests/Format/FormatTestVerilog.cpp2
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp5
-rw-r--r--clang/unittests/StaticAnalyzer/RangeSetTest.cpp14
-rw-r--r--clang/unittests/StaticAnalyzer/SValTest.cpp7
-rw-r--r--clang/utils/TableGen/MveEmitter.cpp3
-rw-r--r--compiler-rt/test/asan/TestCases/Windows/basic_exception_handling.cpp33
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIRType.h3
-rw-r--r--flang/include/flang/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.h14
-rw-r--r--flang/include/flang/Optimizer/OpenMP/Passes.td4
-rw-r--r--flang/lib/Lower/Bridge.cpp2
-rw-r--r--flang/lib/Lower/ConvertExpr.cpp2
-rw-r--r--flang/lib/Optimizer/Builder/Character.cpp2
-rw-r--r--flang/lib/Optimizer/Builder/IntrinsicCall.cpp3
-rw-r--r--flang/lib/Optimizer/Dialect/FIRType.cpp19
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp62
-rw-r--r--flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp241
-rw-r--r--flang/lib/Optimizer/OpenMP/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp1852
-rw-r--r--flang/lib/Optimizer/Passes/Pipelines.cpp4
-rw-r--r--flang/lib/Optimizer/Transforms/AffinePromotion.cpp2
-rw-r--r--flang/lib/Optimizer/Transforms/StackArrays.cpp2
-rw-r--r--flang/test/Fir/OpenACC/pointer-like-interface-alloc.mlir122
-rw-r--r--flang/test/Fir/OpenACC/pointer-like-interface-copy.mlir120
-rw-r--r--flang/test/Fir/OpenACC/pointer-like-interface-free.mlir94
-rw-r--r--flang/test/Fir/OpenACC/recipe-populate-firstprivate.mlir166
-rw-r--r--flang/test/Fir/OpenACC/recipe-populate-private.mlir223
-rw-r--r--flang/test/Fir/basic-program.fir1
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-multiple.f9020
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-saxpy-1d.f9039
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-saxpy-2d.f9045
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-saxpy-3d.f9047
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-saxpy-and-scalar-assign.f9053
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-saxpy-two-2d.f9068
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-scalar-assign.f9029
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-target-teams-clauses.f9032
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-teams-unsupported-after.f9022
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-teams-unsupported-before.f9022
-rw-r--r--flang/test/Lower/polymorphic-temp.f9071
-rw-r--r--flang/test/Transforms/OpenMP/lower-workdistribute-doloop.mlir33
-rw-r--r--flang/test/Transforms/OpenMP/lower-workdistribute-fission-host.mlir117
-rw-r--r--flang/test/Transforms/OpenMP/lower-workdistribute-fission-target.mlir118
-rw-r--r--flang/test/Transforms/OpenMP/lower-workdistribute-fission.mlir71
-rw-r--r--flang/test/Transforms/OpenMP/lower-workdistribute-runtime-assign-scalar.mlir108
-rw-r--r--flang/tools/fir-opt/CMakeLists.txt1
-rw-r--r--flang/tools/fir-opt/fir-opt.cpp6
-rw-r--r--libc/include/llvm-libc-macros/netinet-in-macros.h8
-rw-r--r--libc/shared/math.h1
-rw-r--r--libc/shared/math/exp2m1f.h23
-rw-r--r--libc/src/__support/math/CMakeLists.txt18
-rw-r--r--libc/src/__support/math/exp2m1f.h195
-rw-r--r--libc/src/math/generic/CMakeLists.txt12
-rw-r--r--libc/src/math/generic/exp2m1f.cpp177
-rw-r--r--libc/test/include/netinet_in_test.cpp10
-rw-r--r--libc/test/shared/CMakeLists.txt1
-rw-r--r--libc/test/shared/shared_math_test.cpp1
-rw-r--r--libc/test/src/arpa/inet/CMakeLists.txt8
-rw-r--r--libcxx/include/__configuration/abi.h12
-rw-r--r--libcxx/include/__cxx03/vector8
-rw-r--r--libcxx/include/__memory/array_cookie.h84
-rw-r--r--libcxx/include/__tree26
-rw-r--r--libcxx/test/std/containers/sequences/vector.bool/shrink_to_fit.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/smartptr/unique.ptr/unique.ptr.class/unique.ptr.observers/assert.subscript.pass.cpp48
-rwxr-xr-xlibcxx/utils/compare-benchmarks15
-rw-r--r--lld/MachO/Driver.cpp11
-rw-r--r--lld/test/CMakeLists.txt1
-rw-r--r--lld/test/MachO/read-workers-no-thread-support.s10
-rw-r--r--lld/test/MachO/read-workers.s5
-rw-r--r--lld/test/lit.cfg.py3
-rw-r--r--lld/test/lit.site.cfg.py.in1
-rw-r--r--lldb/include/lldb/Utility/DataExtractor.h2
-rw-r--r--lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.cpp65
-rw-r--r--lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.h4
-rw-r--r--lldb/test/API/lang/objc/ivar-in-framework-base/Makefile6
-rw-r--r--lldb/test/API/lang/objc/ivar-in-framework-base/TestIvarInFrameworkBase.py39
-rw-r--r--lldb/test/API/lang/objc/ivar-in-framework-base/lib.h6
-rw-r--r--lldb/test/API/lang/objc/ivar-in-framework-base/lib.m8
-rw-r--r--lldb/test/API/lang/objc/ivar-in-framework-base/main.m22
-rw-r--r--llvm/docs/AMDGPUUsage.rst2
-rw-r--r--llvm/docs/CodeOfConduct.rst1
-rw-r--r--llvm/docs/CommandGuide/dsymutil.rst8
-rw-r--r--llvm/docs/LangRef.rst25
-rw-r--r--llvm/docs/SPIRVUsage.rst2
-rw-r--r--llvm/docs/TableGen/BackEnds.rst50
-rw-r--r--llvm/include/llvm-c/DebugInfo.h24
-rw-r--r--llvm/include/llvm/ADT/APFloat.h152
-rw-r--r--llvm/include/llvm/ADT/DenseMap.h2
-rw-r--r--llvm/include/llvm/ADT/DepthFirstIterator.h18
-rw-r--r--llvm/include/llvm/ADT/ImmutableSet.h6
-rw-r--r--llvm/include/llvm/ADT/PostOrderIterator.h6
-rw-r--r--llvm/include/llvm/ADT/STLExtras.h2
-rw-r--r--llvm/include/llvm/ADT/STLForwardCompat.h48
-rw-r--r--llvm/include/llvm/ADT/SmallPtrSet.h12
-rw-r--r--llvm/include/llvm/ADT/bit.h42
-rw-r--r--llvm/include/llvm/Analysis/LoopAnalysisManager.h2
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolution.h1
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h12
-rw-r--r--llvm/include/llvm/IR/DebugProgramInstruction.h10
-rw-r--r--llvm/include/llvm/IR/Value.h4
-rw-r--r--llvm/include/llvm/Support/Alignment.h2
-rw-r--r--llvm/include/llvm/Support/Casting.h7
-rw-r--r--llvm/include/llvm/Support/CommandLine.h2
-rw-r--r--llvm/include/llvm/Support/DOTGraphTraits.h5
-rw-r--r--llvm/include/llvm/Support/ELFAttributes.h2
-rw-r--r--llvm/include/llvm/Support/LSP/Protocol.h2
-rw-r--r--llvm/include/llvm/Support/MD5.h2
-rw-r--r--llvm/include/llvm/Support/MathExtras.h53
-rw-r--r--llvm/include/llvm/Support/Timer.h2
-rw-r--r--llvm/include/llvm/Transforms/Scalar/LoopPassManager.h15
-rwxr-xr-xllvm/lib/Analysis/ConstantFolding.cpp56
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp10
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp166
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp55
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp59
-rw-r--r--llvm/lib/IR/DebugInfo.cpp43
-rw-r--r--llvm/lib/IR/Verifier.cpp5
-rw-r--r--llvm/lib/Passes/PassBuilder.cpp33
-rw-r--r--llvm/lib/Passes/PassBuilderPipelines.cpp34
-rw-r--r--llvm/lib/Remarks/BitstreamRemarkParser.h4
-rw-r--r--llvm/lib/Support/APFloat.cpp538
-rw-r--r--llvm/lib/Support/SourceMgr.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/DSInstructions.td6
-rw-r--r--llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/FLATInstructions.td6
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegPressure.h9
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp61
-rw-r--r--llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp47
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp29
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td3
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp22
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td28
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td726
-rw-r--r--llvm/lib/Target/RISCV/RISCVSubtarget.h5
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp20
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp10
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp32
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td4
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp74
-rw-r--r--llvm/lib/Target/X86/X86ISelLoweringCall.cpp20
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp22
-rw-r--r--llvm/lib/Target/X86/X86MCInstLower.cpp31
-rw-r--r--llvm/lib/Transforms/CFGuard/CFGuard.cpp25
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp42
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp20
-rw-r--r--llvm/lib/Transforms/Scalar/LoopPassManager.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp59
-rw-r--r--llvm/lib/Transforms/Scalar/Reg2Mem.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp34
-rw-r--r--llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp40
-rw-r--r--llvm/lib/Transforms/Utils/SCCPSolver.cpp96
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp111
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h12
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp2
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp8
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp9
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanUtils.h2
-rw-r--r--llvm/test/Bindings/llvm-c/debug_info_new_format.ll107
-rw-r--r--llvm/test/CodeGen/AMDGPU/abs_i16.ll980
-rw-r--r--llvm/test/CodeGen/AMDGPU/add.v2i16.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll22342
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll2356
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll5894
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll1242
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll768
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll362
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll7815
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll2484
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll4594
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll1340
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll4962
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll5336
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll5688
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll6014
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll6338
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll1411
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll140
-rw-r--r--llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/bypass-div.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-f16-true16.mir87
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll335
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg.bf16.ll123
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptosi.f16.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptoui.f16.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/frem.ll65
-rw-r--r--llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.sqrt.bf16.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-constant-i8.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/sdiv.ll788
-rw-r--r--llvm/test/CodeGen/AMDGPU/select.f16.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/srem.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/sub.v2i16.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-and.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll80
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-or.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-xor.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector_rebroadcast.ll29
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/div_minsize.ll148
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll186
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll607
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll294
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll571
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll258
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll571
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll607
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll88
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll161
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll216
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll226
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll270
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll270
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll288
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll288
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll282
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll264
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll282
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll571
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll571
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll571
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll288
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll294
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll559
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll519
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll773
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll264
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll264
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll506
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll519
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll506
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll506
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll519
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll773
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll496
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_maximal_reconvergence/enable-maximal-reconvergence.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll22
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWBufferNonUniformIdx.ll (renamed from llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/StructuredBufferNonUniformIdx.ll)0
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWStructuredBufferNonUniformIdx.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageConstIdx.ll (renamed from llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageDynIdx.ll)2
-rw-r--r--llvm/test/CodeGen/X86/avx-shift.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx2-arith.ll4
-rw-r--r--llvm/test/CodeGen/X86/combine-mul.ll22
-rw-r--r--llvm/test/CodeGen/X86/combine-multiplies.ll4
-rw-r--r--llvm/test/CodeGen/X86/combine-pmuldq.ll24
-rw-r--r--llvm/test/CodeGen/X86/combine-rotates.ll4
-rw-r--r--llvm/test/CodeGen/X86/combine-sdiv.ll8
-rw-r--r--llvm/test/CodeGen/X86/combine-shl.ll54
-rw-r--r--llvm/test/CodeGen/X86/combine-srem.ll10
-rw-r--r--llvm/test/CodeGen/X86/combine-udiv.ll10
-rw-r--r--llvm/test/CodeGen/X86/combine-umax.ll2
-rw-r--r--llvm/test/CodeGen/X86/combine-umin.ll2
-rw-r--r--llvm/test/CodeGen/X86/combine-urem.ll4
-rw-r--r--llvm/test/CodeGen/X86/dagcombine-shifts.ll4
-rw-r--r--llvm/test/CodeGen/X86/funnel-shift.ll8
-rw-r--r--llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll8
-rw-r--r--llvm/test/CodeGen/X86/known-pow2.ll6
-rw-r--r--llvm/test/CodeGen/X86/madd.ll8
-rw-r--r--llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll16
-rw-r--r--llvm/test/CodeGen/X86/pmul.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr162812.ll50
-rw-r--r--llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll2
-rw-r--r--llvm/test/CodeGen/X86/rotate-extract-vector.ll38
-rw-r--r--llvm/test/CodeGen/X86/sdiv-exact.ll18
-rw-r--r--llvm/test/CodeGen/X86/shrink_vmul.ll20
-rw-r--r--llvm/test/CodeGen/X86/slow-pmulld.ll8
-rw-r--r--llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll388
-rw-r--r--llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll24
-rw-r--r--llvm/test/CodeGen/X86/udiv-exact.ll18
-rw-r--r--llvm/test/CodeGen/X86/undo-mul-and.ll18
-rw-r--r--llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll8
-rw-r--r--llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll470
-rw-r--r--llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll20
-rw-r--r--llvm/test/CodeGen/X86/urem-seteq-vec-splat.ll36
-rw-r--r--llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll20
-rw-r--r--llvm/test/CodeGen/X86/var-permute-128.ll8
-rw-r--r--llvm/test/CodeGen/X86/vec_reassociate.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-compress.ll70
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-128.ll12
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-256.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-rot-128.ll16
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-rot-256.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll16
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-sub128.ll12
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-128.ll12
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-256.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-rot-128.ll16
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-rot-256.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll16
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll36
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll32
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll32
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll56
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll56
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll56
-rw-r--r--llvm/test/CodeGen/X86/vector-mul.ll100
-rw-r--r--llvm/test/CodeGen/X86/vector-rotate-128.ll16
-rw-r--r--llvm/test/CodeGen/X86/vector-rotate-256.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-shl-128.ll12
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-shl-256.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-trunc-math.ll92
-rw-r--r--llvm/test/CodeGen/X86/vselect-avx.ll18
-rw-r--r--llvm/test/CodeGen/X86/vselect-pcmp.ll6
-rw-r--r--llvm/test/CodeGen/X86/zero-call-used-regs-simd.ll216
-rw-r--r--llvm/test/DebugInfo/Generic/compileunit-source-language-name.ll10
-rw-r--r--llvm/test/DebugInfo/X86/shrink-wrap-frame-setup-no-loc.mir99
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/asan-win-dont-instrument-catchpad.ll63
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vds_alias.s12
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vflat_alias.s75
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx8_vop3cx_nowarn.txt422
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3c_nowarn.txt402
-rw-r--r--llvm/test/Other/loop-pm-invalidation.ll30
-rw-r--r--llvm/test/Other/new-pm-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll1
-rw-r--r--llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll173
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-trunc.ll30
-rw-r--r--llvm/test/Transforms/InstCombine/scmp.ll261
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll1
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/bitcount.ll17
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/bitreverse.ll51
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/bswap.ll17
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll1
-rw-r--r--llvm/test/Transforms/LoopPredication/preserve-bpi.ll60
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr48832.ll2
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/data-layout-multiply-fused.ll174
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/data-layout.ll162
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder-rm.ll96
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder.ll96
-rw-r--r--llvm/test/Transforms/PhaseOrdering/unswitch-cold-func.ll (renamed from llvm/test/Transforms/SimpleLoopUnswitch/PGO-nontrivial-unswitch.ll)9
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/last-non-copyable-inst-used-outside-bb.ll89
-rw-r--r--llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-markloopasdeleted.ll1
-rw-r--r--llvm/test/Verifier/matrix-intrinsics.ll23
-rw-r--r--llvm/test/tools/llvm-reduce/reduce-instructions-alloca.ll16
-rw-r--r--llvm/tools/llvm-c-test/debuginfo.c5
-rw-r--r--llvm/tools/llvm-reduce/deltas/ReduceInstructions.cpp6
-rw-r--r--llvm/unittests/ADT/BitTest.cpp16
-rw-r--r--llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp51
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/ReOptimizeLayerTest.cpp2
-rw-r--r--llvm/utils/gn/secondary/lld/test/BUILD.gn7
-rw-r--r--llvm/utils/profcheck-xfail.txt1
-rw-r--r--[-rwxr-xr-x]llvm/utils/release/build_llvm_release.bat150
-rw-r--r--mlir/Maintainers.md2
-rw-r--r--mlir/docs/Canonicalization.md2
-rw-r--r--mlir/docs/Dialects/Shard.md6
-rw-r--r--mlir/include/mlir-c/Rewrite.h2
-rw-r--r--mlir/include/mlir/Conversion/MathToROCDL/MathToROCDL.h8
-rw-r--r--mlir/include/mlir/Conversion/Passes.td9
-rw-r--r--mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td93
-rw-r--r--mlir/include/mlir/Dialect/Affine/IR/AffineOps.td1
-rw-r--r--mlir/include/mlir/Dialect/Arith/IR/ArithOps.td94
-rw-r--r--mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h57
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td61
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h37
-rw-r--r--mlir/include/mlir/Dialect/SMT/IR/SMTOps.td2
-rw-r--r--mlir/include/mlir/Dialect/Shard/IR/ShardOps.td117
-rw-r--r--mlir/include/mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.h1
-rw-r--r--mlir/include/mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.td17
-rw-r--r--mlir/include/mlir/Dialect/WasmSSA/IR/WasmSSAOps.td175
-rw-r--r--mlir/include/mlir/IR/CommonTypeConstraints.td8
-rw-r--r--mlir/include/mlir/Target/Wasm/WasmBinaryEncoding.h71
-rw-r--r--mlir/lib/Bindings/Python/Rewrite.cpp2
-rw-r--r--mlir/lib/CAPI/Transforms/Rewrite.cpp2
-rw-r--r--mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp2
-rw-r--r--mlir/lib/Conversion/MathToROCDL/CMakeLists.txt1
-rw-r--r--mlir/lib/Conversion/MathToROCDL/MathToROCDL.cpp76
-rw-r--r--mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp19
-rw-r--r--mlir/lib/Dialect/Affine/IR/AffineOps.cpp171
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp70
-rw-r--r--mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt6
-rw-r--r--mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp139
-rw-r--r--mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp16
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp140
-rw-r--r--mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp36
-rw-r--r--mlir/lib/Dialect/MemRef/Transforms/EmulateWideInt.cpp2
-rw-r--r--mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp152
-rw-r--r--mlir/lib/Dialect/Transform/SMTExtension/SMTExtensionOps.cpp83
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp26
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp48
-rw-r--r--mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp2
-rw-r--r--mlir/lib/Dialect/WasmSSA/IR/WasmSSAOps.cpp141
-rw-r--r--mlir/lib/RegisterAllPasses.cpp1
-rw-r--r--mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp2
-rw-r--r--mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp5
-rw-r--r--mlir/lib/Target/Wasm/TranslateFromWasm.cpp459
-rw-r--r--mlir/python/mlir/dialects/transform/smt.py12
-rw-r--r--mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir76
-rw-r--r--mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir44
-rw-r--r--mlir/test/Dialect/AMDGPU/invalid.mlir24
-rw-r--r--mlir/test/Dialect/AMDGPU/ops.mlir55
-rw-r--r--mlir/test/Dialect/LLVMIR/rocdl.mlir51
-rw-r--r--mlir/test/Dialect/MemRef/canonicalize.mlir30
-rw-r--r--mlir/test/Dialect/Tensor/one-shot-bufferize.mlir29
-rw-r--r--mlir/test/Dialect/Transform/test-smt-extension-invalid.mlir116
-rw-r--r--mlir/test/Dialect/Transform/test-smt-extension.mlir21
-rw-r--r--mlir/test/Dialect/Vector/linearize.mlir41
-rw-r--r--mlir/test/Dialect/Vector/vector-warp-distribute.mlir35
-rw-r--r--mlir/test/Dialect/WasmSSA/custom_parser/global.mlir4
-rw-r--r--mlir/test/Dialect/WasmSSA/custom_parser/if.mlir8
-rw-r--r--mlir/test/Dialect/WasmSSA/custom_parser/import.mlir8
-rw-r--r--mlir/test/Dialect/WasmSSA/custom_parser/local.mlir12
-rw-r--r--mlir/test/Dialect/WasmSSA/custom_parser/memory.mlir8
-rw-r--r--mlir/test/Dialect/WasmSSA/custom_parser/table.mlir6
-rw-r--r--mlir/test/Dialect/WasmSSA/extend-invalid.mlir4
-rw-r--r--mlir/test/Dialect/WasmSSA/global-invalid.mlir12
-rw-r--r--mlir/test/Dialect/WasmSSA/locals-invalid.mlir4
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/LANE/lit.local.cfg4
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/LANE/simple_gemm.mlir121
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/SG/lit.local.cfg4
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/SG/simple_gemm.mlir120
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/WG/lit.local.cfg4
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/WG/simple_gemm.mlir151
-rw-r--r--mlir/test/Target/LLVMIR/rocdl.mlir129
-rw-r--r--mlir/test/Target/Wasm/abs.mlir4
-rw-r--r--mlir/test/Target/Wasm/add_div.mlir40
-rw-r--r--mlir/test/Target/Wasm/and.mlir4
-rw-r--r--mlir/test/Target/Wasm/block.mlir16
-rw-r--r--mlir/test/Target/Wasm/block_complete_type.mlir24
-rw-r--r--mlir/test/Target/Wasm/block_value_type.mlir19
-rw-r--r--mlir/test/Target/Wasm/branch_if.mlir29
-rw-r--r--mlir/test/Target/Wasm/call.mlir17
-rw-r--r--mlir/test/Target/Wasm/clz.mlir4
-rw-r--r--mlir/test/Target/Wasm/comparison_ops.mlir269
-rw-r--r--mlir/test/Target/Wasm/const.mlir8
-rw-r--r--mlir/test/Target/Wasm/convert.mlir85
-rw-r--r--mlir/test/Target/Wasm/copysign.mlir4
-rw-r--r--mlir/test/Target/Wasm/ctz.mlir4
-rw-r--r--mlir/test/Target/Wasm/demote.mlir15
-rw-r--r--mlir/test/Target/Wasm/div.mlir20
-rw-r--r--mlir/test/Target/Wasm/double_nested_loop.mlir63
-rw-r--r--mlir/test/Target/Wasm/empty_blocks_list_and_stack.mlir53
-rw-r--r--mlir/test/Target/Wasm/eq.mlir56
-rw-r--r--mlir/test/Target/Wasm/eqz.mlir21
-rw-r--r--mlir/test/Target/Wasm/extend.mlir69
-rw-r--r--mlir/test/Target/Wasm/global.mlir16
-rw-r--r--mlir/test/Target/Wasm/if.mlir112
-rw-r--r--mlir/test/Target/Wasm/import.mlir12
-rw-r--r--mlir/test/Target/Wasm/inputs/add_div.yaml.wasm50
-rw-r--r--mlir/test/Target/Wasm/inputs/block.yaml.wasm22
-rw-r--r--mlir/test/Target/Wasm/inputs/block_complete_type.yaml.wasm23
-rw-r--r--mlir/test/Target/Wasm/inputs/block_value_type.yaml.wasm18
-rw-r--r--mlir/test/Target/Wasm/inputs/branch_if.yaml.wasm18
-rw-r--r--mlir/test/Target/Wasm/inputs/call.yaml.wasm26
-rw-r--r--mlir/test/Target/Wasm/inputs/comparison_ops.yaml.wasm88
-rw-r--r--mlir/test/Target/Wasm/inputs/convert.yaml.wasm69
-rw-r--r--mlir/test/Target/Wasm/inputs/demote.yaml.wasm18
-rw-r--r--mlir/test/Target/Wasm/inputs/double_nested_loop.yaml.wasm19
-rw-r--r--mlir/test/Target/Wasm/inputs/empty_blocks_list_and_stack.yaml.wasm21
-rw-r--r--mlir/test/Target/Wasm/inputs/eq.yaml.wasm27
-rw-r--r--mlir/test/Target/Wasm/inputs/eqz.yaml.wasm29
-rw-r--r--mlir/test/Target/Wasm/inputs/extend.yaml.wasm40
-rw-r--r--mlir/test/Target/Wasm/inputs/if.yaml.wasm25
-rw-r--r--mlir/test/Target/Wasm/inputs/loop.yaml.wasm17
-rw-r--r--mlir/test/Target/Wasm/inputs/loop_with_inst.yaml.wasm20
-rw-r--r--mlir/test/Target/Wasm/inputs/ne.yaml.wasm27
-rw-r--r--mlir/test/Target/Wasm/inputs/promote.yaml.wasm18
-rw-r--r--mlir/test/Target/Wasm/inputs/reinterpret.yaml.wasm53
-rw-r--r--mlir/test/Target/Wasm/inputs/rounding.yaml.wasm37
-rw-r--r--mlir/test/Target/Wasm/inputs/wrap.yaml.wasm24
-rw-r--r--mlir/test/Target/Wasm/invalid_block_type_index.yaml28
-rw-r--r--mlir/test/Target/Wasm/local.mlir6
-rw-r--r--mlir/test/Target/Wasm/loop.mlir17
-rw-r--r--mlir/test/Target/Wasm/loop_with_inst.mlir33
-rw-r--r--mlir/test/Target/Wasm/max.mlir4
-rw-r--r--mlir/test/Target/Wasm/memory_min_eq_max.mlir2
-rw-r--r--mlir/test/Target/Wasm/memory_min_max.mlir2
-rw-r--r--mlir/test/Target/Wasm/memory_min_no_max.mlir2
-rw-r--r--mlir/test/Target/Wasm/min.mlir4
-rw-r--r--mlir/test/Target/Wasm/ne.mlir52
-rw-r--r--mlir/test/Target/Wasm/neg.mlir4
-rw-r--r--mlir/test/Target/Wasm/or.mlir4
-rw-r--r--mlir/test/Target/Wasm/popcnt.mlir4
-rw-r--r--mlir/test/Target/Wasm/promote.mlir14
-rw-r--r--mlir/test/Target/Wasm/reinterpret.mlir46
-rw-r--r--mlir/test/Target/Wasm/rem.mlir8
-rw-r--r--mlir/test/Target/Wasm/rotl.mlir4
-rw-r--r--mlir/test/Target/Wasm/rotr.mlir4
-rw-r--r--mlir/test/Target/Wasm/rounding.mlir50
-rw-r--r--mlir/test/Target/Wasm/shl.mlir4
-rw-r--r--mlir/test/Target/Wasm/shr_s.mlir4
-rw-r--r--mlir/test/Target/Wasm/shr_u.mlir4
-rw-r--r--mlir/test/Target/Wasm/sqrt.mlir4
-rw-r--r--mlir/test/Target/Wasm/sub.mlir8
-rw-r--r--mlir/test/Target/Wasm/wrap.mlir15
-rw-r--r--mlir/test/Target/Wasm/xor.mlir4
-rw-r--r--mlir/test/python/dialects/transform_smt_ext.py30
-rw-r--r--mlir/unittests/Dialect/SparseTensor/MergerTest.cpp2
-rw-r--r--offload/include/OpenMP/InteropAPI.h12
-rw-r--r--offload/libomptarget/OpenMP/InteropAPI.cpp3
-rw-r--r--polly/lib/Transform/Canonicalization.cpp6
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel19
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel11
864 files changed, 161212 insertions, 33020 deletions
diff --git a/.ci/premerge_advisor_upload.py b/.ci/premerge_advisor_upload.py
index dda4ad2..1fc2423d 100644
--- a/.ci/premerge_advisor_upload.py
+++ b/.ci/premerge_advisor_upload.py
@@ -32,8 +32,11 @@ def main(commit_sha, workflow_run_number, build_log_files):
"platform": current_platform,
}
if test_failures:
- for name, failure_message in test_failures:
- failure_info["failures"].append({"name": name, "message": failure_message})
+ for _, failures in test_failures.items():
+ for name, failure_message in failures:
+ failure_info["failures"].append(
+ {"name": name, "message": failure_message}
+ )
else:
ninja_failures = generate_test_report_lib.find_failure_in_ninja_logs(ninja_logs)
for name, failure_message in ninja_failures:
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 4a688d0..64fb60a4 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -131,6 +131,7 @@
/mlir/test/python/ @ftynse @makslevental @stellaraccident @rolfmorel
/mlir/python/ @ftynse @makslevental @stellaraccident @rolfmorel
/mlir/lib/Bindings/Python @makslevental @rolfmorel
+/mlir/include/Bindings/Python @makslevental @rolfmorel
# MLIR Mem2Reg/SROA
/mlir/**/Transforms/Mem2Reg.* @moxinilian
diff --git a/.github/workflows/premerge.yaml b/.github/workflows/premerge.yaml
index 03c0c01..951fc16 100644
--- a/.github/workflows/premerge.yaml
+++ b/.github/workflows/premerge.yaml
@@ -62,6 +62,7 @@ jobs:
with:
fetch-depth: 2
- name: Build and Test
+ timeout-minutes: 120
continue-on-error: ${{ runner.arch == 'ARM64' }}
run: |
git config --global --add safe.directory '*'
@@ -149,6 +150,7 @@ jobs:
echo "windows-runtimes=${runtimes_to_build}" >> $GITHUB_OUTPUT
echo "windows-runtimes-check-targets=${runtimes_check_targets}" >> $GITHUB_OUTPUT
- name: Build and Test
+ timeout-minutes: 180
if: ${{ steps.vars.outputs.windows-projects != '' }}
shell: cmd
run: |
diff --git a/clang-tools-extra/clang-doc/Generators.cpp b/clang-tools-extra/clang-doc/Generators.cpp
index 3fb5b63..a5f6f1c 100644
--- a/clang-tools-extra/clang-doc/Generators.cpp
+++ b/clang-tools-extra/clang-doc/Generators.cpp
@@ -97,15 +97,11 @@ void Generator::addInfoToIndex(Index &Idx, const doc::Info *Info) {
// This anchor is used to force the linker to link in the generated object file
// and thus register the generators.
-static int LLVM_ATTRIBUTE_UNUSED YAMLGeneratorAnchorDest =
- YAMLGeneratorAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED MDGeneratorAnchorDest =
- MDGeneratorAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED HTMLGeneratorAnchorDest =
- HTMLGeneratorAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED MHTMLGeneratorAnchorDest =
+[[maybe_unused]] static int YAMLGeneratorAnchorDest = YAMLGeneratorAnchorSource;
+[[maybe_unused]] static int MDGeneratorAnchorDest = MDGeneratorAnchorSource;
+[[maybe_unused]] static int HTMLGeneratorAnchorDest = HTMLGeneratorAnchorSource;
+[[maybe_unused]] static int MHTMLGeneratorAnchorDest =
MHTMLGeneratorAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED JSONGeneratorAnchorDest =
- JSONGeneratorAnchorSource;
+[[maybe_unused]] static int JSONGeneratorAnchorDest = JSONGeneratorAnchorSource;
} // namespace doc
} // namespace clang
diff --git a/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h b/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h
index cdf6ce2..afc358a 100644
--- a/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h
+++ b/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h
@@ -16,132 +16,131 @@ namespace clang::tidy {
// This anchor is used to force the linker to link the AbseilModule.
extern volatile int AbseilModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED AbseilModuleAnchorDestination =
+[[maybe_unused]] static int AbseilModuleAnchorDestination =
AbseilModuleAnchorSource;
// This anchor is used to force the linker to link the AlteraModule.
extern volatile int AlteraModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED AlteraModuleAnchorDestination =
+[[maybe_unused]] static int AlteraModuleAnchorDestination =
AlteraModuleAnchorSource;
// This anchor is used to force the linker to link the AndroidModule.
extern volatile int AndroidModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED AndroidModuleAnchorDestination =
+[[maybe_unused]] static int AndroidModuleAnchorDestination =
AndroidModuleAnchorSource;
// This anchor is used to force the linker to link the BoostModule.
extern volatile int BoostModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED BoostModuleAnchorDestination =
+[[maybe_unused]] static int BoostModuleAnchorDestination =
BoostModuleAnchorSource;
// This anchor is used to force the linker to link the BugproneModule.
extern volatile int BugproneModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED BugproneModuleAnchorDestination =
+[[maybe_unused]] static int BugproneModuleAnchorDestination =
BugproneModuleAnchorSource;
// This anchor is used to force the linker to link the CERTModule.
extern volatile int CERTModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED CERTModuleAnchorDestination =
+[[maybe_unused]] static int CERTModuleAnchorDestination =
CERTModuleAnchorSource;
// This anchor is used to force the linker to link the ConcurrencyModule.
extern volatile int ConcurrencyModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED ConcurrencyModuleAnchorDestination =
+[[maybe_unused]] static int ConcurrencyModuleAnchorDestination =
ConcurrencyModuleAnchorSource;
// This anchor is used to force the linker to link the CppCoreGuidelinesModule.
extern volatile int CppCoreGuidelinesModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED CppCoreGuidelinesModuleAnchorDestination =
+[[maybe_unused]] static int CppCoreGuidelinesModuleAnchorDestination =
CppCoreGuidelinesModuleAnchorSource;
#if CLANG_TIDY_ENABLE_QUERY_BASED_CUSTOM_CHECKS
// This anchor is used to force the linker to link the CustomModule.
extern volatile int CustomModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED CustomModuleAnchorDestination =
+[[maybe_unused]] static int CustomModuleAnchorDestination =
CustomModuleAnchorSource;
#endif
// This anchor is used to force the linker to link the DarwinModule.
extern volatile int DarwinModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED DarwinModuleAnchorDestination =
+[[maybe_unused]] static int DarwinModuleAnchorDestination =
DarwinModuleAnchorSource;
// This anchor is used to force the linker to link the FuchsiaModule.
extern volatile int FuchsiaModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED FuchsiaModuleAnchorDestination =
+[[maybe_unused]] static int FuchsiaModuleAnchorDestination =
FuchsiaModuleAnchorSource;
// This anchor is used to force the linker to link the GoogleModule.
extern volatile int GoogleModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED GoogleModuleAnchorDestination =
+[[maybe_unused]] static int GoogleModuleAnchorDestination =
GoogleModuleAnchorSource;
// This anchor is used to force the linker to link the HICPPModule.
extern volatile int HICPPModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED HICPPModuleAnchorDestination =
+[[maybe_unused]] static int HICPPModuleAnchorDestination =
HICPPModuleAnchorSource;
// This anchor is used to force the linker to link the LinuxKernelModule.
extern volatile int LinuxKernelModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED LinuxKernelModuleAnchorDestination =
+[[maybe_unused]] static int LinuxKernelModuleAnchorDestination =
LinuxKernelModuleAnchorSource;
// This anchor is used to force the linker to link the LLVMModule.
extern volatile int LLVMModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED LLVMModuleAnchorDestination =
+[[maybe_unused]] static int LLVMModuleAnchorDestination =
LLVMModuleAnchorSource;
// This anchor is used to force the linker to link the LLVMLibcModule.
extern volatile int LLVMLibcModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED LLVMLibcModuleAnchorDestination =
+[[maybe_unused]] static int LLVMLibcModuleAnchorDestination =
LLVMLibcModuleAnchorSource;
// This anchor is used to force the linker to link the MiscModule.
extern volatile int MiscModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED MiscModuleAnchorDestination =
+[[maybe_unused]] static int MiscModuleAnchorDestination =
MiscModuleAnchorSource;
// This anchor is used to force the linker to link the ModernizeModule.
extern volatile int ModernizeModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED ModernizeModuleAnchorDestination =
+[[maybe_unused]] static int ModernizeModuleAnchorDestination =
ModernizeModuleAnchorSource;
#if CLANG_TIDY_ENABLE_STATIC_ANALYZER && \
!defined(CLANG_TIDY_DISABLE_STATIC_ANALYZER_CHECKS)
// This anchor is used to force the linker to link the MPIModule.
extern volatile int MPIModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED MPIModuleAnchorDestination =
- MPIModuleAnchorSource;
+[[maybe_unused]] static int MPIModuleAnchorDestination = MPIModuleAnchorSource;
#endif
// This anchor is used to force the linker to link the ObjCModule.
extern volatile int ObjCModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED ObjCModuleAnchorDestination =
+[[maybe_unused]] static int ObjCModuleAnchorDestination =
ObjCModuleAnchorSource;
// This anchor is used to force the linker to link the OpenMPModule.
extern volatile int OpenMPModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED OpenMPModuleAnchorDestination =
+[[maybe_unused]] static int OpenMPModuleAnchorDestination =
OpenMPModuleAnchorSource;
// This anchor is used to force the linker to link the PerformanceModule.
extern volatile int PerformanceModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED PerformanceModuleAnchorDestination =
+[[maybe_unused]] static int PerformanceModuleAnchorDestination =
PerformanceModuleAnchorSource;
// This anchor is used to force the linker to link the PortabilityModule.
extern volatile int PortabilityModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED PortabilityModuleAnchorDestination =
+[[maybe_unused]] static int PortabilityModuleAnchorDestination =
PortabilityModuleAnchorSource;
// This anchor is used to force the linker to link the ReadabilityModule.
extern volatile int ReadabilityModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED ReadabilityModuleAnchorDestination =
+[[maybe_unused]] static int ReadabilityModuleAnchorDestination =
ReadabilityModuleAnchorSource;
// This anchor is used to force the linker to link the ZirconModule.
extern volatile int ZirconModuleAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED ZirconModuleAnchorDestination =
+[[maybe_unused]] static int ZirconModuleAnchorDestination =
ZirconModuleAnchorSource;
} // namespace clang::tidy
diff --git a/clang-tools-extra/clang-tidy/abseil/RedundantStrcatCallsCheck.cpp b/clang-tools-extra/clang-tidy/abseil/RedundantStrcatCallsCheck.cpp
index d7cc0ca..a58c041 100644
--- a/clang-tools-extra/clang-tidy/abseil/RedundantStrcatCallsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/abseil/RedundantStrcatCallsCheck.cpp
@@ -45,7 +45,10 @@ struct StrCatCheckResult {
std::vector<FixItHint> Hints;
};
-void removeCallLeaveArgs(const CallExpr *Call, StrCatCheckResult *CheckResult) {
+} // namespace
+
+static void removeCallLeaveArgs(const CallExpr *Call,
+ StrCatCheckResult *CheckResult) {
if (Call->getNumArgs() == 0)
return;
// Remove 'Foo('
@@ -58,9 +61,9 @@ void removeCallLeaveArgs(const CallExpr *Call, StrCatCheckResult *CheckResult) {
Call->getRParenLoc(), Call->getEndLoc().getLocWithOffset(1))));
}
-const clang::CallExpr *processArgument(const Expr *Arg,
- const MatchFinder::MatchResult &Result,
- StrCatCheckResult *CheckResult) {
+static const clang::CallExpr *
+processArgument(const Expr *Arg, const MatchFinder::MatchResult &Result,
+ StrCatCheckResult *CheckResult) {
const auto IsAlphanum = hasDeclaration(cxxMethodDecl(hasName("AlphaNum")));
static const auto *const Strcat = new auto(hasName("::absl::StrCat"));
const auto IsStrcat = cxxBindTemporaryExpr(
@@ -78,8 +81,8 @@ const clang::CallExpr *processArgument(const Expr *Arg,
return nullptr;
}
-StrCatCheckResult processCall(const CallExpr *RootCall, bool IsAppend,
- const MatchFinder::MatchResult &Result) {
+static StrCatCheckResult processCall(const CallExpr *RootCall, bool IsAppend,
+ const MatchFinder::MatchResult &Result) {
StrCatCheckResult CheckResult;
std::deque<const CallExpr *> CallsToProcess = {RootCall};
@@ -101,7 +104,6 @@ StrCatCheckResult processCall(const CallExpr *RootCall, bool IsAppend,
}
return CheckResult;
}
-} // namespace
void RedundantStrcatCallsCheck::check(const MatchFinder::MatchResult &Result) {
bool IsAppend = false;
diff --git a/clang-tools-extra/clang-tidy/bugprone/MultipleStatementMacroCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/MultipleStatementMacroCheck.cpp
index 390f3dd..54ed899 100644
--- a/clang-tools-extra/clang-tidy/bugprone/MultipleStatementMacroCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/MultipleStatementMacroCheck.cpp
@@ -18,8 +18,11 @@ namespace {
AST_MATCHER(Expr, isInMacro) { return Node.getBeginLoc().isMacroID(); }
+} // namespace
+
/// Find the next statement after `S`.
-const Stmt *nextStmt(const MatchFinder::MatchResult &Result, const Stmt *S) {
+static const Stmt *nextStmt(const MatchFinder::MatchResult &Result,
+ const Stmt *S) {
auto Parents = Result.Context->getParents(*S);
if (Parents.empty())
return nullptr;
@@ -40,8 +43,8 @@ using ExpansionRanges = std::vector<SourceRange>;
/// \brief Get all the macro expansion ranges related to `Loc`.
///
/// The result is ordered from most inner to most outer.
-ExpansionRanges getExpansionRanges(SourceLocation Loc,
- const MatchFinder::MatchResult &Result) {
+static ExpansionRanges
+getExpansionRanges(SourceLocation Loc, const MatchFinder::MatchResult &Result) {
ExpansionRanges Locs;
while (Loc.isMacroID()) {
Locs.push_back(
@@ -51,8 +54,6 @@ ExpansionRanges getExpansionRanges(SourceLocation Loc,
return Locs;
}
-} // namespace
-
void MultipleStatementMacroCheck::registerMatchers(MatchFinder *Finder) {
const auto Inner = expr(isInMacro(), unless(compoundStmt())).bind("inner");
Finder->addMatcher(
diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp
index 37d737a..1ac9b8b 100644
--- a/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp
+++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp
@@ -28,10 +28,13 @@ AST_MATCHER(CXXRecordDecl, hasDefaultConstructor) {
return Node.hasDefaultConstructor();
}
+} // namespace
+
// Iterate over all the fields in a record type, both direct and indirect (e.g.
// if the record contains an anonymous struct).
template <typename T, typename Func>
-void forEachField(const RecordDecl &Record, const T &Fields, const Func &Fn) {
+static void forEachField(const RecordDecl &Record, const T &Fields,
+ const Func &Fn) {
for (const FieldDecl *F : Fields) {
if (F->isAnonymousStructOrUnion()) {
if (const CXXRecordDecl *R = F->getType()->getAsCXXRecordDecl())
@@ -43,8 +46,9 @@ void forEachField(const RecordDecl &Record, const T &Fields, const Func &Fn) {
}
template <typename T, typename Func>
-void forEachFieldWithFilter(const RecordDecl &Record, const T &Fields,
- bool &AnyMemberHasInitPerUnion, const Func &Fn) {
+static void forEachFieldWithFilter(const RecordDecl &Record, const T &Fields,
+ bool &AnyMemberHasInitPerUnion,
+ const Func &Fn) {
for (const FieldDecl *F : Fields) {
if (F->isAnonymousStructOrUnion()) {
if (const CXXRecordDecl *R = F->getType()->getAsCXXRecordDecl()) {
@@ -59,8 +63,9 @@ void forEachFieldWithFilter(const RecordDecl &Record, const T &Fields,
}
}
-void removeFieldInitialized(const FieldDecl *M,
- SmallPtrSetImpl<const FieldDecl *> &FieldDecls) {
+static void
+removeFieldInitialized(const FieldDecl *M,
+ SmallPtrSetImpl<const FieldDecl *> &FieldDecls) {
const RecordDecl *R = M->getParent();
if (R && R->isUnion()) {
// Erase all members in a union if any member of it is initialized.
@@ -70,9 +75,9 @@ void removeFieldInitialized(const FieldDecl *M,
FieldDecls.erase(M);
}
-void removeFieldsInitializedInBody(
- const Stmt &Stmt, ASTContext &Context,
- SmallPtrSetImpl<const FieldDecl *> &FieldDecls) {
+static void
+removeFieldsInitializedInBody(const Stmt &Stmt, ASTContext &Context,
+ SmallPtrSetImpl<const FieldDecl *> &FieldDecls) {
auto Matches =
match(findAll(binaryOperator(
hasOperatorName("="),
@@ -82,9 +87,9 @@ void removeFieldsInitializedInBody(
removeFieldInitialized(Match.getNodeAs<FieldDecl>("fieldDecl"), FieldDecls);
}
-StringRef getName(const FieldDecl *Field) { return Field->getName(); }
+static StringRef getName(const FieldDecl *Field) { return Field->getName(); }
-StringRef getName(const RecordDecl *Record) {
+static StringRef getName(const RecordDecl *Record) {
// Get the typedef name if this is a C-style anonymous struct and typedef.
if (const TypedefNameDecl *Typedef = Record->getTypedefNameForAnonDecl())
return Typedef->getName();
@@ -94,7 +99,7 @@ StringRef getName(const RecordDecl *Record) {
// Creates comma separated list of decls requiring initialization in order of
// declaration.
template <typename R, typename T>
-std::string
+static std::string
toCommaSeparatedString(const R &OrderedDecls,
const SmallPtrSetImpl<const T *> &DeclsToInit) {
SmallVector<StringRef, 16> Names;
@@ -105,12 +110,14 @@ toCommaSeparatedString(const R &OrderedDecls,
return llvm::join(Names.begin(), Names.end(), ", ");
}
-SourceLocation getLocationForEndOfToken(const ASTContext &Context,
- SourceLocation Location) {
+static SourceLocation getLocationForEndOfToken(const ASTContext &Context,
+ SourceLocation Location) {
return Lexer::getLocForEndOfToken(Location, 0, Context.getSourceManager(),
Context.getLangOpts());
}
+namespace {
+
// There are 3 kinds of insertion placements:
enum class InitializerPlacement {
// 1. The fields are inserted after an existing CXXCtorInitializer stored in
@@ -187,15 +194,17 @@ struct InitializerInsertion {
SmallVector<std::string, 4> Initializers;
};
+} // namespace
+
// Convenience utility to get a RecordDecl from a QualType.
-const RecordDecl *getCanonicalRecordDecl(const QualType &Type) {
+static const RecordDecl *getCanonicalRecordDecl(const QualType &Type) {
if (const auto *RT = Type->getAsCanonical<RecordType>())
return RT->getDecl();
return nullptr;
}
template <typename R, typename T>
-SmallVector<InitializerInsertion, 16>
+static SmallVector<InitializerInsertion, 16>
computeInsertions(const CXXConstructorDecl::init_const_range &Inits,
const R &OrderedDecls,
const SmallPtrSetImpl<const T *> &DeclsToInit) {
@@ -239,8 +248,9 @@ computeInsertions(const CXXConstructorDecl::init_const_range &Inits,
// Gets the list of bases and members that could possibly be initialized, in
// order as they appear in the class declaration.
-void getInitializationsInOrder(const CXXRecordDecl &ClassDecl,
- SmallVectorImpl<const NamedDecl *> &Decls) {
+static void
+getInitializationsInOrder(const CXXRecordDecl &ClassDecl,
+ SmallVectorImpl<const NamedDecl *> &Decls) {
Decls.clear();
for (const auto &Base : ClassDecl.bases()) {
// Decl may be null if the base class is a template parameter.
@@ -253,9 +263,10 @@ void getInitializationsInOrder(const CXXRecordDecl &ClassDecl,
}
template <typename T>
-void fixInitializerList(const ASTContext &Context, DiagnosticBuilder &Diag,
- const CXXConstructorDecl *Ctor,
- const SmallPtrSetImpl<const T *> &DeclsToInit) {
+static void fixInitializerList(const ASTContext &Context,
+ DiagnosticBuilder &Diag,
+ const CXXConstructorDecl *Ctor,
+ const SmallPtrSetImpl<const T *> &DeclsToInit) {
// Do not propose fixes in macros since we cannot place them correctly.
if (Ctor->getBeginLoc().isMacroID())
return;
@@ -271,8 +282,6 @@ void fixInitializerList(const ASTContext &Context, DiagnosticBuilder &Diag,
}
}
-} // anonymous namespace
-
ProTypeMemberInitCheck::ProTypeMemberInitCheck(StringRef Name,
ClangTidyContext *Context)
: ClangTidyCheck(Name, Context),
diff --git a/clang-tools-extra/clang-tidy/misc/CoroutineHostileRAIICheck.cpp b/clang-tools-extra/clang-tidy/misc/CoroutineHostileRAIICheck.cpp
index 8ec7695..3b9b8e0 100644
--- a/clang-tools-extra/clang-tidy/misc/CoroutineHostileRAIICheck.cpp
+++ b/clang-tools-extra/clang-tidy/misc/CoroutineHostileRAIICheck.cpp
@@ -60,12 +60,12 @@ AST_MATCHER_P(CoawaitExpr, awaitable, ast_matchers::internal::Matcher<Expr>,
return InnerMatcher.matches(*E, Finder, Builder);
return false;
}
+} // namespace
-auto typeWithNameIn(const std::vector<StringRef> &Names) {
+static auto typeWithNameIn(const std::vector<StringRef> &Names) {
return hasType(
hasCanonicalType(hasDeclaration(namedDecl(hasAnyName(Names)))));
}
-} // namespace
CoroutineHostileRAIICheck::CoroutineHostileRAIICheck(StringRef Name,
ClangTidyContext *Context)
diff --git a/clang-tools-extra/clang-tidy/misc/NoRecursionCheck.cpp b/clang-tools-extra/clang-tidy/misc/NoRecursionCheck.cpp
index 0d7667c..035598d 100644
--- a/clang-tools-extra/clang-tidy/misc/NoRecursionCheck.cpp
+++ b/clang-tools-extra/clang-tidy/misc/NoRecursionCheck.cpp
@@ -151,10 +151,12 @@ constexpr unsigned SmallSCCSize = 32;
using CallStackTy =
llvm::SmallVector<CallGraphNode::CallRecord, SmallCallStackSize>;
+} // namespace
+
// In given SCC, find *some* call stack that will be cyclic.
// This will only find *one* such stack, it might not be the smallest one,
// and there may be other loops.
-CallStackTy pathfindSomeCycle(ArrayRef<CallGraphNode *> SCC) {
+static CallStackTy pathfindSomeCycle(ArrayRef<CallGraphNode *> SCC) {
// We'll need to be able to performantly look up whether some CallGraphNode
// is in SCC or not, so cache all the SCC elements in a set.
const ImmutableSmallSet<CallGraphNode *, SmallSCCSize> SCCElts(SCC);
@@ -190,8 +192,6 @@ CallStackTy pathfindSomeCycle(ArrayRef<CallGraphNode *> SCC) {
return CallStack;
}
-} // namespace
-
void NoRecursionCheck::registerMatchers(MatchFinder *Finder) {
Finder->addMatcher(translationUnitDecl().bind("TUDecl"), this);
}
diff --git a/clang-tools-extra/clang-tidy/readability/UppercaseLiteralSuffixCheck.cpp b/clang-tools-extra/clang-tidy/readability/UppercaseLiteralSuffixCheck.cpp
index c1dc209..740a68d 100644
--- a/clang-tools-extra/clang-tidy/readability/UppercaseLiteralSuffixCheck.cpp
+++ b/clang-tools-extra/clang-tidy/readability/UppercaseLiteralSuffixCheck.cpp
@@ -55,8 +55,10 @@ struct NewSuffix {
std::optional<FixItHint> FixIt;
};
-std::optional<SourceLocation> getMacroAwareLocation(SourceLocation Loc,
- const SourceManager &SM) {
+} // namespace
+
+static std::optional<SourceLocation>
+getMacroAwareLocation(SourceLocation Loc, const SourceManager &SM) {
// Do nothing if the provided location is invalid.
if (Loc.isInvalid())
return std::nullopt;
@@ -67,8 +69,8 @@ std::optional<SourceLocation> getMacroAwareLocation(SourceLocation Loc,
return SpellingLoc;
}
-std::optional<SourceRange> getMacroAwareSourceRange(SourceRange Loc,
- const SourceManager &SM) {
+static std::optional<SourceRange>
+getMacroAwareSourceRange(SourceRange Loc, const SourceManager &SM) {
std::optional<SourceLocation> Begin =
getMacroAwareLocation(Loc.getBegin(), SM);
std::optional<SourceLocation> End = getMacroAwareLocation(Loc.getEnd(), SM);
@@ -77,7 +79,7 @@ std::optional<SourceRange> getMacroAwareSourceRange(SourceRange Loc,
return SourceRange(*Begin, *End);
}
-std::optional<std::string>
+static std::optional<std::string>
getNewSuffix(llvm::StringRef OldSuffix,
const std::vector<StringRef> &NewSuffixes) {
// If there is no config, just uppercase the entirety of the suffix.
@@ -96,7 +98,7 @@ getNewSuffix(llvm::StringRef OldSuffix,
}
template <typename LiteralType>
-std::optional<NewSuffix>
+static std::optional<NewSuffix>
shouldReplaceLiteralSuffix(const Expr &Literal,
const std::vector<StringRef> &NewSuffixes,
const SourceManager &SM, const LangOptions &LO) {
@@ -174,8 +176,6 @@ shouldReplaceLiteralSuffix(const Expr &Literal,
return ReplacementDsc;
}
-} // namespace
-
UppercaseLiteralSuffixCheck::UppercaseLiteralSuffixCheck(
StringRef Name, ClangTidyContext *Context)
: ClangTidyCheck(Name, Context),
diff --git a/clang-tools-extra/clang-tidy/utils/DeclRefExprUtils.cpp b/clang-tools-extra/clang-tidy/utils/DeclRefExprUtils.cpp
index 57453ad..a5b0883 100644
--- a/clang-tools-extra/clang-tidy/utils/DeclRefExprUtils.cpp
+++ b/clang-tools-extra/clang-tidy/utils/DeclRefExprUtils.cpp
@@ -19,9 +19,8 @@ namespace clang::tidy::utils::decl_ref_expr {
using namespace ::clang::ast_matchers;
using llvm::SmallPtrSet;
-namespace {
-
-template <typename S> bool isSetDifferenceEmpty(const S &S1, const S &S2) {
+template <typename S>
+static bool isSetDifferenceEmpty(const S &S1, const S &S2) {
for (auto E : S1)
if (S2.count(E) == 0)
return false;
@@ -30,15 +29,15 @@ template <typename S> bool isSetDifferenceEmpty(const S &S1, const S &S2) {
// Extracts all Nodes keyed by ID from Matches and inserts them into Nodes.
template <typename Node>
-void extractNodesByIdTo(ArrayRef<BoundNodes> Matches, StringRef ID,
- SmallPtrSet<const Node *, 16> &Nodes) {
+static void extractNodesByIdTo(ArrayRef<BoundNodes> Matches, StringRef ID,
+ SmallPtrSet<const Node *, 16> &Nodes) {
for (const auto &Match : Matches)
Nodes.insert(Match.getNodeAs<Node>(ID));
}
// Returns true if both types refer to the same type,
// ignoring the const-qualifier.
-bool isSameTypeIgnoringConst(QualType A, QualType B) {
+static bool isSameTypeIgnoringConst(QualType A, QualType B) {
A = A.getCanonicalType();
B = B.getCanonicalType();
A.addConst();
@@ -47,7 +46,8 @@ bool isSameTypeIgnoringConst(QualType A, QualType B) {
}
// Returns true if `D` and `O` have the same parameter types.
-bool hasSameParameterTypes(const CXXMethodDecl &D, const CXXMethodDecl &O) {
+static bool hasSameParameterTypes(const CXXMethodDecl &D,
+ const CXXMethodDecl &O) {
if (D.getNumParams() != O.getNumParams())
return false;
for (int I = 0, E = D.getNumParams(); I < E; ++I) {
@@ -60,7 +60,7 @@ bool hasSameParameterTypes(const CXXMethodDecl &D, const CXXMethodDecl &O) {
// If `D` has a const-qualified overload with otherwise identical
// ref-qualifiers and parameter types, returns that overload.
-const CXXMethodDecl *findConstOverload(const CXXMethodDecl &D) {
+static const CXXMethodDecl *findConstOverload(const CXXMethodDecl &D) {
assert(!D.isConst());
DeclContext::lookup_result LookupResult =
@@ -81,7 +81,7 @@ const CXXMethodDecl *findConstOverload(const CXXMethodDecl &D) {
// Returns true if both types are pointers or reference to the same type,
// ignoring the const-qualifier.
-bool pointsToSameTypeIgnoringConst(QualType A, QualType B) {
+static bool pointsToSameTypeIgnoringConst(QualType A, QualType B) {
assert(A->isPointerType() || A->isReferenceType());
assert(B->isPointerType() || B->isReferenceType());
return isSameTypeIgnoringConst(A->getPointeeType(), B->getPointeeType());
@@ -122,7 +122,7 @@ bool pointsToSameTypeIgnoringConst(QualType A, QualType B) {
//
// This function checks (A) ad (B), but the caller should make sure that the
// object is not mutated through the return value.
-bool isLikelyShallowConst(const CXXMethodDecl &M) {
+static bool isLikelyShallowConst(const CXXMethodDecl &M) {
assert(!M.isConst());
// The method can mutate our variable.
@@ -146,6 +146,8 @@ bool isLikelyShallowConst(const CXXMethodDecl &M) {
return isSameTypeIgnoringConst(CallTy, OverloadTy);
}
+namespace {
+
// A matcher that matches DeclRefExprs that are used in ways such that the
// underlying declaration is not modified.
// If the declaration is of pointer type, `Indirections` specifies the level
diff --git a/clang-tools-extra/clang-tidy/utils/DesignatedInitializers.cpp b/clang-tools-extra/clang-tidy/utils/DesignatedInitializers.cpp
index 044f89b..b068ae2 100644
--- a/clang-tools-extra/clang-tidy/utils/DesignatedInitializers.cpp
+++ b/clang-tools-extra/clang-tidy/utils/DesignatedInitializers.cpp
@@ -19,8 +19,6 @@
namespace clang::tidy::utils {
-namespace {
-
/// Returns true if Name is reserved, like _Foo or __Vector_base.
static inline bool isReservedName(llvm::StringRef Name) {
// This doesn't catch all cases, but the most common.
@@ -28,6 +26,8 @@ static inline bool isReservedName(llvm::StringRef Name) {
(isUppercase(Name[1]) || Name[1] == '_');
}
+namespace {
+
// Helper class to iterate over the designator names of an aggregate type.
//
// For an array type, yields [0], [1], [2]...
@@ -112,6 +112,8 @@ private:
RecordDecl::field_iterator FieldsEnd;
};
+} // namespace
+
// Collect designator labels describing the elements of an init list.
//
// This function contributes the designators of some (sub)object, which is
@@ -127,10 +129,9 @@ private:
// '.a:' is produced directly without recursing into the written sublist.
// (The written sublist will have a separate collectDesignators() call later).
// Recursion with Prefix='.b' and Sem = {3, ImplicitValue} produces '.b.x:'.
-void collectDesignators(const InitListExpr *Sem,
- llvm::DenseMap<SourceLocation, std::string> &Out,
- const llvm::DenseSet<SourceLocation> &NestedBraces,
- std::string &Prefix) {
+static void collectDesignators(
+ const InitListExpr *Sem, llvm::DenseMap<SourceLocation, std::string> &Out,
+ const llvm::DenseSet<SourceLocation> &NestedBraces, std::string &Prefix) {
if (!Sem || Sem->isTransparent())
return;
assert(Sem->isSemanticForm());
@@ -170,8 +171,6 @@ void collectDesignators(const InitListExpr *Sem,
}
}
-} // namespace
-
llvm::DenseMap<SourceLocation, std::string>
getUnwrittenDesignators(const InitListExpr *Syn) {
assert(Syn->isSyntacticForm());
diff --git a/clang-tools-extra/clangd/FindTarget.cpp b/clang-tools-extra/clangd/FindTarget.cpp
index ce79f88..f80f732 100644
--- a/clang-tools-extra/clangd/FindTarget.cpp
+++ b/clang-tools-extra/clangd/FindTarget.cpp
@@ -50,7 +50,7 @@ namespace clang {
namespace clangd {
namespace {
-LLVM_ATTRIBUTE_UNUSED std::string nodeToString(const DynTypedNode &N) {
+[[maybe_unused]] std::string nodeToString(const DynTypedNode &N) {
std::string S = std::string(N.getNodeKind().asStringRef());
{
llvm::raw_string_ostream OS(S);
diff --git a/clang-tools-extra/clangd/unittests/FileDistanceTests.cpp b/clang-tools-extra/clangd/unittests/FileDistanceTests.cpp
index 3003582..aed3400 100644
--- a/clang-tools-extra/clangd/unittests/FileDistanceTests.cpp
+++ b/clang-tools-extra/clangd/unittests/FileDistanceTests.cpp
@@ -58,7 +58,7 @@ TEST(FileDistanceTests, BadSource) {
}
// Force the unittest URI scheme to be linked,
-static int LLVM_ATTRIBUTE_UNUSED UseUnittestScheme = UnittestSchemeAnchorSource;
+[[maybe_unused]] static int UseUnittestScheme = UnittestSchemeAnchorSource;
TEST(FileDistanceTests, URI) {
FileDistanceOptions Opts;
diff --git a/clang-tools-extra/clangd/unittests/QualityTests.cpp b/clang-tools-extra/clangd/unittests/QualityTests.cpp
index 4954659..879a179 100644
--- a/clang-tools-extra/clangd/unittests/QualityTests.cpp
+++ b/clang-tools-extra/clangd/unittests/QualityTests.cpp
@@ -33,7 +33,7 @@ namespace clang {
namespace clangd {
// Force the unittest URI scheme to be linked,
-static int LLVM_ATTRIBUTE_UNUSED UnittestSchemeAnchorDest =
+[[maybe_unused]] static int UnittestSchemeAnchorDest =
UnittestSchemeAnchorSource;
namespace {
diff --git a/clang-tools-extra/clangd/unittests/URITests.cpp b/clang-tools-extra/clangd/unittests/URITests.cpp
index 99d59b6..c0ccfc5 100644
--- a/clang-tools-extra/clangd/unittests/URITests.cpp
+++ b/clang-tools-extra/clangd/unittests/URITests.cpp
@@ -16,7 +16,7 @@ namespace clang {
namespace clangd {
// Force the unittest URI scheme to be linked,
-static int LLVM_ATTRIBUTE_UNUSED UnittestSchemeAnchorDest =
+[[maybe_unused]] static int UnittestSchemeAnchorDest =
UnittestSchemeAnchorSource;
namespace {
diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst
index b746df5..570cab2 100644
--- a/clang/docs/ClangFormatStyleOptions.rst
+++ b/clang/docs/ClangFormatStyleOptions.rst
@@ -245,7 +245,7 @@ the configuration (without a prefix: ``Auto``).
.. note::
This currently only applies to braced initializer lists (when
- ``Cpp11BracedListStyle`` is ``true``) and parentheses.
+ ``Cpp11BracedListStyle`` is not ``Block``) and parentheses.
@@ -3816,29 +3816,72 @@ the configuration (without a prefix: ``Auto``).
.. _Cpp11BracedListStyle:
-**Cpp11BracedListStyle** (``Boolean``) :versionbadge:`clang-format 3.4` :ref:`¶ <Cpp11BracedListStyle>`
- If ``true``, format braced lists as best suited for C++11 braced
- lists.
+**Cpp11BracedListStyle** (``BracedListStyle``) :versionbadge:`clang-format 3.4` :ref:`¶ <Cpp11BracedListStyle>`
+ The style to handle braced lists.
- Important differences:
+ Possible values:
- * No spaces inside the braced list.
- * No line break before the closing brace.
- * Indentation with the continuation indent, not with the block indent.
+ * ``BLS_Block`` (in configuration: ``Block``)
+ Best suited for pre C++11 braced lists.
- Fundamentally, C++11 braced lists are formatted exactly like function
- calls would be formatted in their place. If the braced list follows a name
- (e.g. a type or variable name), clang-format formats as if the ``{}`` were
- the parentheses of a function call with that name. If there is no name,
- a zero-length name is assumed.
+ * Spaces inside the braced list.
+ * Line break before the closing brace.
+ * Indentation with the block indent.
+
+
+ .. code-block:: c++
+
+ vector<int> x{ 1, 2, 3, 4 };
+ vector<T> x{ {}, {}, {}, {} };
+ f(MyMap[{ composite, key }]);
+ new int[3]{ 1, 2, 3 };
+ Type name{ // Comment
+ value
+ };
+
+ * ``BLS_FunctionCall`` (in configuration: ``FunctionCall``)
+ Best suited for C++11 braced lists.
+
+ * No spaces inside the braced list.
+ * No line break before the closing brace.
+ * Indentation with the continuation indent.
+
+ Fundamentally, C++11 braced lists are formatted exactly like function
+ calls would be formatted in their place. If the braced list follows a
+ name (e.g. a type or variable name), clang-format formats as if the
+ ``{}`` were the parentheses of a function call with that name. If there
+ is no name, a zero-length name is assumed.
+
+ .. code-block:: c++
+
+ vector<int> x{1, 2, 3, 4};
+ vector<T> x{{}, {}, {}, {}};
+ f(MyMap[{composite, key}]);
+ new int[3]{1, 2, 3};
+ Type name{ // Comment
+ value};
+
+ * ``BLS_AlignFirstComment`` (in configuration: ``AlignFirstComment``)
+ Same as ``FunctionCall``, except for the handling of a comment at the
+ begin, it then aligns everything following with the comment.
+
+ * No spaces inside the braced list. (Even for a comment at the first
+ position.)
+ * No line break before the closing brace.
+ * Indentation with the continuation indent, except when followed by a
+ line comment, then it uses the block indent.
+
+
+ .. code-block:: c++
+
+ vector<int> x{1, 2, 3, 4};
+ vector<T> x{{}, {}, {}, {}};
+ f(MyMap[{composite, key}]);
+ new int[3]{1, 2, 3};
+ Type name{// Comment
+ value};
- .. code-block:: c++
- true: false:
- vector<int> x{1, 2, 3, 4}; vs. vector<int> x{ 1, 2, 3, 4 };
- vector<T> x{{}, {}, {}, {}}; vector<T> x{ {}, {}, {}, {} };
- f(MyMap[{composite, key}]); f(MyMap[{ composite, key }]);
- new int[3]{1, 2, 3}; new int[3]{ 1, 2, 3 };
.. _DeriveLineEnding:
@@ -6625,7 +6668,7 @@ the configuration (without a prefix: ``Auto``).
.. note::
This option doesn't apply to initializer braces if
- ``Cpp11BracedListStyle`` is set to ``true``.
+ ``Cpp11BracedListStyle`` is not ``Block``.
Possible values:
diff --git a/clang/docs/InternalsManual.rst b/clang/docs/InternalsManual.rst
index c677ddfa..eff46ab 100644
--- a/clang/docs/InternalsManual.rst
+++ b/clang/docs/InternalsManual.rst
@@ -10,7 +10,7 @@ Introduction
This document describes some of the more important APIs and internal design
decisions made in the Clang C front-end. The purpose of this document is to
-both capture some of this high level information and also describe some of the
+both capture some of this high-level information and also describe some of the
design decisions behind it. This is meant for people interested in hacking on
Clang, not for end-users. The description below is categorized by libraries,
and does not describe any of the clients of the libraries.
@@ -20,7 +20,7 @@ LLVM Support Library
The LLVM ``libSupport`` library provides many underlying libraries and
`data-structures <https://llvm.org/docs/ProgrammersManual.html>`_, including
-command line option processing, various containers and a system abstraction
+command line option processing, various containers, and a system abstraction
layer, which is used for file system access.
The Clang "Basic" Library
@@ -34,7 +34,7 @@ and information about the subset of the language being compiled for.
Part of this infrastructure is specific to C (such as the ``TargetInfo``
class), other parts could be reused for other non-C-based languages
(``SourceLocation``, ``SourceManager``, ``Diagnostics``, ``FileManager``).
-When and if there is future demand we can figure out if it makes sense to
+When and if there is future demand, we can figure out if it makes sense to
introduce a new library, move the general classes somewhere else, or introduce
some other solution.
@@ -96,7 +96,7 @@ The ``EXTENSION`` and ``EXTWARN`` severities are used for extensions to the
language that Clang accepts. This means that Clang fully understands and can
represent them in the AST, but we produce diagnostics to tell the user their
code is non-portable. The difference is that the former are ignored by
-default, and the later warn by default. The ``WARNING`` severity is used for
+default, and the latter warn by default. The ``WARNING`` severity is used for
constructs that are valid in the currently selected source language but that
are dubious in some way. The ``REMARK`` severity provides generic information
about the compilation that is not necessarily related to any dubious code. The
@@ -106,7 +106,7 @@ These *severities* are mapped into a smaller set (the ``Diagnostic::Level``
enum, {``Ignored``, ``Note``, ``Remark``, ``Warning``, ``Error``, ``Fatal``}) of
output
*levels* by the diagnostics subsystem based on various configuration options.
-Clang internally supports a fully fine grained mapping mechanism that allows
+Clang internally supports a fully fine-grained mapping mechanism that allows
you to map almost any diagnostic to the output level that you want. The only
diagnostics that cannot be mapped are ``NOTE``\ s, which always follow the
severity of the previously emitted diagnostic and ``ERROR``\ s, which can only
@@ -116,18 +116,18 @@ example).
Diagnostic mappings are used in many ways. For example, if the user specifies
``-pedantic``, ``EXTENSION`` maps to ``Warning``, if they specify
``-pedantic-errors``, it turns into ``Error``. This is used to implement
-options like ``-Wunused_macros``, ``-Wundef`` etc.
+options like ``-Wunused_macros``, ``-Wundef``, etc.
Mapping to ``Fatal`` should only be used for diagnostics that are considered so
severe that error recovery won't be able to recover sensibly from them (thus
-spewing a ton of bogus errors). One example of this class of error are failure
+spewing a ton of bogus errors). One example of this class of error is failure
to ``#include`` a file.
Diagnostic Wording
^^^^^^^^^^^^^^^^^^
The wording used for a diagnostic is critical because it is the only way for a
user to know how to correct their code. Use the following suggestions when
-wording a diagnostic.
+wording a diagnostic:
* Diagnostics in Clang do not start with a capital letter and do not end with
punctuation.
@@ -162,7 +162,7 @@ wording a diagnostic.
cannot be null in well-defined C++ code``.
* Prefer diagnostic wording without contractions whenever possible. The single
quote in a contraction can be visually distracting due to its use with
- syntactic constructs and contractions can be harder to understand for non-
+ syntactic constructs, and contractions can be harder to understand for non-
native English speakers.
The Format String
@@ -195,14 +195,14 @@ the C++ code that :ref:`produces them <internals-producing-diag>`, and are
referenced by ``%0`` .. ``%9``. If you have more than 10 arguments to your
diagnostic, you are doing something wrong :). Unlike ``printf``, there is no
requirement that arguments to the diagnostic end up in the output in the same
-order as they are specified, you could have a format string with "``%1 %0``"
+order as they are specified; you could have a format string with "``%1 %0``"
that swaps them, for example. The text in between the percent and digit are
formatting instructions. If there are no instructions, the argument is just
turned into a string and substituted in.
Here are some "best practices" for writing the English format string:
-* Keep the string short. It should ideally fit in the 80 column limit of the
+* Keep the string short. It should ideally fit in the 80-column limit of the
``DiagnosticKinds.td`` file. This avoids the diagnostic wrapping when
printed, and forces you to think about the important point you are conveying
with the diagnostic.
@@ -227,7 +227,7 @@ used to achieve this sort of thing in a localizable way, see below.
Formatting a Diagnostic Argument
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Arguments to diagnostics are fully typed internally, and come from a couple
+Arguments to diagnostics are fully typed internally and come from a couple of
different classes: integers, types, names, and random strings. Depending on
the class of the argument, it can be optionally formatted in different ways.
This gives the ``DiagnosticConsumer`` information about what the argument means
@@ -268,7 +268,7 @@ Description:
This format specifier is used to merge multiple related diagnostics together
into one common one, without requiring the difference to be specified as an
English string argument. Instead of specifying the string, the diagnostic
- gets an integer argument and the format string selects the numbered option.
+ gets an integer argument, and the format string selects the numbered option.
In this case, the "``%0``" value must be an integer in the range [0..2]. If
it is 0, it prints "unary", if it is 1 it prints "binary" if it is 2, it
prints "unary or binary". This allows other language translations to
@@ -287,7 +287,7 @@ Description:
additionally generates a namespace, enumeration, and enumerator list based on
the format string given. In the above case, a namespace is generated named
``FrobbleKind`` that has an unscoped enumeration with the enumerators
- ``VarDecl`` and ``FuncDecl`` which correspond to the values 0 and 1. This
+ ``VarDecl`` and ``FuncDecl``, which correspond to the values 0 and 1. This
permits a clearer use of the ``Diag`` in source code, as the above could be
called as: ``Diag(Loc, diag::frobble) << diag::FrobbleKind::VarDecl``.
@@ -407,7 +407,7 @@ Example:
def note_ovl_candidate : Note<
"candidate %sub{select_ovl_candidate}3,2,1 not viable">;
- and will act as if it was written
+ and will act as if it were written
``"candidate %select{function|constructor}3%select{| template| %1}2 not viable"``.
Description:
This format specifier is used to avoid repeating strings verbatim in multiple
@@ -447,7 +447,7 @@ For example, the binary expression error comes from code like this:
<< lex->getType() << rex->getType()
<< lex->getSourceRange() << rex->getSourceRange();
-This shows that use of the ``Diag`` method: it takes a location (a
+This shows the use of the ``Diag`` method: it takes a location (a
:ref:`SourceLocation <SourceLocation>` object) and a diagnostic enum value
(which matches the name from ``Diagnostic*Kinds.td``). If the diagnostic takes
arguments, they are specified with the ``<<`` operator: the first argument
@@ -586,7 +586,7 @@ Strangely enough, the ``SourceLocation`` class represents a location within the
source code of the program. Important design points include:
#. ``sizeof(SourceLocation)`` must be extremely small, as these are embedded
- into many AST nodes and are passed around often. Currently it is 32 bits.
+ into many AST nodes and are passed around often. Currently, it is 32 bits.
#. ``SourceLocation`` must be a simple value object that can be efficiently
copied.
#. We should be able to represent a source location for any byte of any input
@@ -605,7 +605,7 @@ In practice, the ``SourceLocation`` works together with the ``SourceManager``
class to encode two pieces of information about a location: its spelling
location and its expansion location. For most tokens, these will be the
same. However, for a macro expansion (or tokens that came from a ``_Pragma``
-directive) these will describe the location of the characters corresponding to
+directive), these will describe the location of the characters corresponding to
the token and the location where the token was used (i.e., the macro
expansion point or the location of the ``_Pragma`` itself).
@@ -621,7 +621,7 @@ token. This concept maps directly to the "spelling location" for the token.
.. mostly taken from https://discourse.llvm.org/t/code-ranges-of-tokens-ast-elements/16893/2
Clang represents most source ranges by [first, last], where "first" and "last"
-each point to the beginning of their respective tokens. For example consider
+each point to the beginning of their respective tokens. For example, consider
the ``SourceRange`` of the following statement:
.. code-block:: text
@@ -632,7 +632,7 @@ the ``SourceRange`` of the following statement:
To map from this representation to a character-based representation, the "last"
location needs to be adjusted to point to (or past) the end of that token with
either ``Lexer::MeasureTokenLength()`` or ``Lexer::getLocForEndOfToken()``. For
-the rare cases where character-level source ranges information is needed we use
+the rare cases where character-level source ranges information is needed, we use
the ``CharSourceRange`` class.
The Driver Library
@@ -651,17 +651,17 @@ The Frontend Library
====================
The Frontend library contains functionality useful for building tools on top of
-the Clang libraries, for example several methods for outputting diagnostics.
+the Clang libraries, including several methods for outputting diagnostics.
Compiler Invocation
-------------------
One of the classes provided by the Frontend library is ``CompilerInvocation``,
-which holds information that describe current invocation of the Clang ``-cc1``
+which holds information that describes the current invocation of the Clang ``-cc1``
frontend. The information typically comes from the command line constructed by
the Clang driver or from clients performing custom initialization. The data
structure is split into logical units used by different parts of the compiler,
-for example ``PreprocessorOptions``, ``LanguageOptions`` or ``CodeGenOptions``.
+for example, ``PreprocessorOptions``, ``LanguageOptions``, or ``CodeGenOptions``.
Command Line Interface
----------------------
@@ -698,7 +698,7 @@ Adding new Command Line Option
------------------------------
When adding a new command line option, the first place of interest is the header
-file declaring the corresponding options class (e.g. ``CodeGenOptions.h`` for
+file declaring the corresponding options class (e.g., ``CodeGenOptions.h`` for
command line option that affects the code generation). Create new member
variable for the option value:
@@ -739,7 +739,7 @@ The helper classes take a list of acceptable prefixes of the option (e.g.
Then, specify additional attributes via mix-ins:
* ``HelpText`` holds the text that will be printed besides the option name when
- the user requests help (e.g. via ``clang --help``).
+ the user requests help (e.g., via ``clang --help``).
* ``Group`` specifies the "category" of options this option belongs to. This is
used by various tools to categorize and sometimes filter options.
* ``Flags`` may contain "tags" associated with the option. These may affect how
@@ -779,7 +779,7 @@ use them to construct the ``-cc1`` job:
}
The last step is implementing the ``-cc1`` command line argument
-parsing/generation that initializes/serializes the option class (in our case
+parsing/generation that initializes/serializes the option class (in our case,
``CodeGenOptions``) stored within ``CompilerInvocation``. This can be done
automatically by using the marshalling annotations on the option definition:
@@ -946,13 +946,13 @@ described below. All of them take a key path argument and possibly other
information required for parsing or generating the command line argument.
**Note:** The marshalling infrastructure is not intended for driver-only
-options. Only options of the ``-cc1`` frontend need to be marshalled to/from
+options. Only options of the ``-cc1`` frontend need to be marshalled to/from a
``CompilerInvocation`` instance.
**Positive Flag**
The key path defaults to ``false`` and is set to ``true`` when the flag is
-present on command line.
+present on the command line.
.. code-block:: text
@@ -963,7 +963,7 @@ present on command line.
**Negative Flag**
The key path defaults to ``true`` and is set to ``false`` when the flag is
-present on command line.
+present on the command line.
.. code-block:: text
@@ -1041,7 +1041,7 @@ and the result is assigned to the key path on success.
The key path defaults to the value specified in ``MarshallingInfoEnum`` prefixed
by the contents of ``NormalizedValuesScope`` and ``::``. This ensures correct
-reference to an enum case is formed even if the enum resides in different
+reference to an enum case is formed even if the enum resides in a different
namespace or is an enum class. If the value present on the command line does not
match any of the comma-separated values from ``Values``, an error diagnostic is
issued. Otherwise, the corresponding element from ``NormalizedValues`` at the
@@ -1410,7 +1410,7 @@ or a clear engineering tradeoff -- should desugar minimally and wrap the result
in a construct representing the original source form.
For example, ``CXXForRangeStmt`` directly represents the syntactic form of a
-range-based for statement, but also holds a semantic representation of the
+range-based for statement but also holds a semantic representation of the
range declaration and iterator declarations. It does not contain a
fully-desugared ``ForStmt``, however.
@@ -1425,7 +1425,7 @@ with the same or similar semantics.
The ``Type`` class and its subclasses
-------------------------------------
-The ``Type`` class (and its subclasses) are an important part of the AST.
+The ``Type`` class (and its subclasses) is an important part of the AST.
Types are accessed through the ``ASTContext`` class, which implicitly creates
and uniques them as they are needed. Types have a couple of non-obvious
features: 1) they do not capture type qualifiers like ``const`` or ``volatile``
@@ -1474,7 +1474,7 @@ various operators (for example, the type of ``*Y`` is "``foo``", not
is an instance of the ``TypedefType`` class, which indicates that the type of
these expressions is a typedef for "``foo``".
-Representing types like this is great for diagnostics, because the
+Representing types like this is great for diagnostics because the
user-specified type is always immediately available. There are two problems
with this: first, various semantic checks need to make judgements about the
*actual structure* of a type, ignoring typedefs. Second, we need an efficient
@@ -1521,7 +1521,7 @@ know it exists. To continue the example, the result type of the indirection
operator is the pointee type of the subexpression. In order to determine the
type, we need to get the instance of ``PointerType`` that best captures the
typedef information in the program. If the type of the expression is literally
-a ``PointerType``, we can return that, otherwise we have to dig through the
+a ``PointerType``, we can return that; otherwise, we have to dig through the
typedefs to find the pointer type. For example, if the subexpression had type
"``foo*``", we could return that type as the result. If the subexpression had
type "``bar``", we want to return "``foo*``" (note that we do *not* want
@@ -1552,7 +1552,7 @@ that sets a bit), and remove one or more type qualifiers (just return a
``QualType`` with the bitfield set to empty).
Further, because the bits are stored outside of the type itself, we do not need
-to create duplicates of types with different sets of qualifiers (i.e. there is
+to create duplicates of types with different sets of qualifiers (i.e., there is
only a single heap allocated "``int``" type: "``const int``" and "``volatile
const int``" both point to the same heap allocated "``int``" type). This
reduces the heap size used to represent bits and also means we do not have to
@@ -1972,7 +1972,7 @@ and optimize code for it, but it's used as parsing continues to detect further
errors in the input. Clang-based tools also depend on such ASTs, and IDEs in
particular benefit from a high-quality AST for broken code.
-In presence of errors, clang uses a few error-recovery strategies to present the
+In the presence of errors, clang uses a few error-recovery strategies to present the
broken code in the AST:
- correcting errors: in cases where clang is confident about the fix, it
@@ -1981,7 +1981,7 @@ broken code in the AST:
provide more accurate subsequent diagnostics. Typo correction is a typical
example.
- representing invalid node: the invalid node is preserved in the AST in some
- form, e.g. when the "declaration" part of the declaration contains semantic
+ form, e.g., when the "declaration" part of the declaration contains semantic
errors, the Decl node is marked as invalid.
- dropping invalid node: this often happens for errors that we don’t have
graceful recovery. Prior to Recovery AST, a mismatched-argument function call
@@ -1994,9 +1994,9 @@ for broken code.
Recovery AST
^^^^^^^^^^^^
-The idea of Recovery AST is to use recovery nodes which act as a placeholder to
+The idea of Recovery AST is to use recovery nodes, which act as a placeholder to
maintain the rough structure of the parsing tree, preserve locations and
-children but have no language semantics attached to them.
+children, but have no language semantics attached to them.
For example, consider the following mismatched function call:
@@ -2031,10 +2031,10 @@ With Recovery AST, the AST looks like:
`-DeclRefExpr <col:9> 'int' lvalue ParmVar 'abc' 'int'
-An alternative is to use existing Exprs, e.g. CallExpr for the above example.
-This would capture more call details (e.g. locations of parentheses) and allow
+An alternative is to use existing Exprs, e.g., CallExpr for the above example.
+This would capture more call details (e.g., locations of parentheses) and allow
it to be treated uniformly with valid CallExprs. However, jamming the data we
-have into CallExpr forces us to weaken its invariants, e.g. arg count may be
+have into CallExpr forces us to weaken its invariants, e.g., arg count may be
wrong. This would introduce a huge burden on consumers of the AST to handle such
"impossible" cases. So when we're representing (rather than correcting) errors,
we use a distinct recovery node type with extremely weak invariants instead.
@@ -2048,7 +2048,7 @@ Types and dependence
^^^^^^^^^^^^^^^^^^^^
``RecoveryExpr`` is an ``Expr``, so it must have a type. In many cases the true
-type can't really be known until the code is corrected (e.g. a call to a
+type can't really be known until the code is corrected (e.g., a call to a
function that doesn't exist). And it means that we can't properly perform type
checks on some containing constructs, such as ``return 42 + unknownFunction()``.
@@ -2058,7 +2058,7 @@ mean dependence on a template parameter or how an error is repaired. The
``DependentTy``, and this suppresses type-based analysis in the same way it
would inside a template.
-In cases where we are confident about the concrete type (e.g. the return type
+In cases where we are confident about the concrete type (e.g., the return type
for a broken non-overloaded function call), the ``RecoveryExpr`` will have this
type. This allows more code to be typechecked, and produces a better AST and
more diagnostics. For example:
@@ -2071,7 +2071,7 @@ more diagnostics. For example:
Whether or not the ``RecoveryExpr`` has a dependent type, it is always
considered value-dependent, because its value isn't well-defined until the error
is resolved. Among other things, this means that clang doesn't emit more errors
-where a RecoveryExpr is used as a constant (e.g. array size), but also won't try
+where a RecoveryExpr is used as a constant (e.g., array size), but also won't try
to evaluate it.
ContainsErrors bit
@@ -2122,7 +2122,7 @@ cycles. One example of a cycle is the connection between a
``ClassTemplateDecl`` and its "templated" ``CXXRecordDecl``. The *templated*
``CXXRecordDecl`` represents all the fields and methods inside the class
template, while the ``ClassTemplateDecl`` holds the information which is
-related to being a template, i.e. template arguments, etc. We can get the
+related to being a template, i.e., template arguments, etc. We can get the
*templated* class (the ``CXXRecordDecl``) of a ``ClassTemplateDecl`` with
``ClassTemplateDecl::getTemplatedDecl()``. And we can get back a pointer of the
"described" class template from the *templated* class:
@@ -2145,7 +2145,7 @@ we skip the copy.
The informal definition of structural equivalency is the following:
Two nodes are **structurally equivalent** if they are
-- builtin types and refer to the same type, e.g. ``int`` and ``int`` are
+- builtin types and refer to the same type, e.g., ``int`` and ``int`` are
structurally equivalent,
- function types and all their parameters have structurally equivalent types,
- record types and all their fields in order of their definition have the same
@@ -2162,7 +2162,7 @@ mentioned properties, we have to check for equivalent template
parameters/arguments, etc.
The structural equivalent check can be and is used independently from the
-ASTImporter, e.g. the ``clang::Sema`` class uses it also.
+ASTImporter, e.g., the ``clang::Sema`` class uses it also.
The equivalence of nodes may depend on the equivalency of other pairs of nodes.
Thus, the check is implemented as a parallel graph traversal. We traverse
@@ -2195,7 +2195,7 @@ Redeclaration Chains
^^^^^^^^^^^^^^^^^^^^
The early version of the ``ASTImporter``'s merge mechanism squashed the
-declarations, i.e. it aimed to have only one declaration instead of maintaining
+declarations, i.e., it aimed to have only one declaration instead of maintaining
a whole redeclaration chain. This early approach simply skipped importing a
function prototype, but it imported a definition. To demonstrate the problem
with this approach let's consider an empty "to" context and the following
@@ -2225,7 +2225,7 @@ another definition, we will use the existing definition. However, we can import
prototype(s): we chain the newly imported prototype(s) to the existing
definition. Whenever we import a new prototype from a third context, that will
be added to the end of the redeclaration chain. This may result in long
-redeclaration chains in certain cases, e.g. if we import from several
+redeclaration chains in certain cases, e.g., if we import from several
translation units which include the same header with the prototype.
.. Squashing prototypes
@@ -2290,7 +2290,7 @@ Traversal during the Import
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The node specific import mechanisms are implemented in
-``ASTNodeImporter::VisitNode()`` functions, e.g. ``VisitFunctionDecl()``.
+``ASTNodeImporter::VisitNode()`` functions, e.g., ``VisitFunctionDecl()``.
When we import a declaration then first we import everything which is needed to
call the constructor of that declaration node. Everything which can be set
later is set after the node is created. For example, in case of a
@@ -2490,7 +2490,7 @@ In case of LLDB, an implementation of the ``ExternalASTSource`` interface is
attached to the AST context which is related to the parsed expression. This
implementation of the ``ExternalASTSource`` interface is realized with the help
of the ``ASTImporter`` class. This way, LLDB can reuse Clang's parsing
-machinery while synthesizing the underlying AST from the debug data (e.g. from
+machinery while synthesizing the underlying AST from the debug data (e.g., from
DWARF). From the view of the ``ASTImporter`` this means both the "to" and the
"from" context may have declaration contexts with external lexical storage. If
a ``DeclContext`` in the "to" AST context has external lexical storage then we
@@ -2573,7 +2573,7 @@ conflict error (ODR violation in C++). In this case, we return with an
clients of the ``ASTImporter`` may require a different, perhaps less
conservative and more liberal error handling strategy.
-E.g. static analysis clients may benefit if the node is created even if there
+E.g., static analysis clients may benefit if the node is created even if there
is a name conflict. During the CTU analysis of certain projects, we recognized
that there are global declarations which collide with declarations from other
translation units, but they are not referenced outside from their translation
@@ -2916,7 +2916,7 @@ Any error during satisfaction is recorded in ``ConstraintSatisfaction``.
for nested requirements, ``ConstraintSatisfaction`` is stored (including
diagnostics) in the AST, which is something we might want to improve.
-When an atomic constraint is not satified, we try to substitute into any
+When an atomic constraint is not satisfied, we try to substitute into any
enclosing concept-id using the same mechanism described above, for
diagnostics purpose, and inject that in the ``ConstraintSatisfaction``.
@@ -3584,7 +3584,7 @@ be specified by appending a ``+`` to the number. For example:
void f(); // expected-note 0+ {{previous declaration is here}}
void g(); // expected-note 1+ {{previous declaration is here}}
-In the first example, the diagnostic becomes optional, i.e. it will be
+In the first example, the diagnostic becomes optional, i.e., it will be
swallowed if it occurs, but will not generate an error if it does not occur. In
the second example, the diagnostic must occur at least once. As a short-hand,
"one or more" can be specified simply by ``+``. For example:
diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst
index 5c73e24..c75c170 100644
--- a/clang/docs/OpenMPSupport.rst
+++ b/clang/docs/OpenMPSupport.rst
@@ -193,7 +193,7 @@ implementation.
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| device | support non-contiguous array sections for target update | :good:`done` | https://github.com/llvm/llvm-project/pull/144635 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | pointer attachment | :good:`done` | |
+| device | pointer attachment | :part:`being repaired` | @abhinavgaba (https://github.com/llvm/llvm-project/pull/153683) |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| atomic | hints for the atomic construct | :good:`done` | D51233 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
@@ -627,6 +627,10 @@ implementation.
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
| loop grid/tile modifiers for sizes clause | :none:`unclaimed` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| attach map-type modifier | :part:`In Progress` | :none:`unclaimed` | C/C++: @abhinavgaba; |
+| | | | RT: @abhinavgaba (https://github.com/llvm/llvm-project/pull/149036, |
+| | | | https://github.com/llvm/llvm-project/pull/158370) |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
OpenMP Extensions
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 4f62a67..fe77f91 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -128,6 +128,17 @@ AST Dumping Potentially Breaking Changes
- Default arguments of template template parameters are pretty-printed now.
+- Pretty-printing of ``asm`` attributes are now always the first attribute
+ on the right side of the declaration. Before we had, e.g.:
+
+ ``__attribute__(("visibility")) asm("string")``
+
+ Now we have:
+
+ ``asm("string") __attribute__(("visibility"))``
+
+ Which is accepted by both clang and gcc parsers.
+
Clang Frontend Potentially Breaking Changes
-------------------------------------------
- Members of anonymous unions/structs are now injected as ``IndirectFieldDecl``
@@ -271,6 +282,8 @@ Non-comprehensive list of changes in this release
allocation functions with a token ID can be enabled via the
``-fsanitize=alloc-token`` flag.
+- Clang now rejects the invalid use of ``constexpr`` with ``auto`` and an explicit type in C. (#GH163090)
+
New Compiler Flags
------------------
- New option ``-fno-sanitize-debug-trap-reasons`` added to disable emitting trap reasons into the debug info when compiling with trapping UBSan (e.g. ``-fsanitize-trap=undefined``).
@@ -476,6 +489,7 @@ Bug Fixes to C++ Support
- Fix a crash when attempting to deduce a deduction guide from a non deducible template template parameter. (#130604)
- Fix for clang incorrectly rejecting the default construction of a union with
nontrivial member when another member has an initializer. (#GH81774)
+- Fixed a template depth issue when parsing lambdas inside a type constraint. (#GH162092)
- Diagnose unresolved overload sets in non-dependent compound requirements. (#GH51246) (#GH97753)
Bug Fixes to AST Handling
diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h
index 78220d4..33aa2d3 100644
--- a/clang/include/clang/AST/ASTContext.h
+++ b/clang/include/clang/AST/ASTContext.h
@@ -2874,11 +2874,11 @@ public:
/// returned type is guaranteed to be free of any of these, allowing two
/// canonical types to be compared for exact equality with a simple pointer
/// comparison.
- CanQualType getCanonicalType(QualType T) const {
+ static CanQualType getCanonicalType(QualType T) {
return CanQualType::CreateUnsafe(T.getCanonicalType());
}
- const Type *getCanonicalType(const Type *T) const {
+ static const Type *getCanonicalType(const Type *T) {
return T->getCanonicalTypeInternal().getTypePtr();
}
@@ -2890,10 +2890,10 @@ public:
CanQualType getCanonicalParamType(QualType T) const;
/// Determine whether the given types \p T1 and \p T2 are equivalent.
- bool hasSameType(QualType T1, QualType T2) const {
+ static bool hasSameType(QualType T1, QualType T2) {
return getCanonicalType(T1) == getCanonicalType(T2);
}
- bool hasSameType(const Type *T1, const Type *T2) const {
+ static bool hasSameType(const Type *T1, const Type *T2) {
return getCanonicalType(T1) == getCanonicalType(T2);
}
@@ -2921,7 +2921,7 @@ public:
/// Determine whether the given types are equivalent after
/// cvr-qualifiers have been removed.
- bool hasSameUnqualifiedType(QualType T1, QualType T2) const {
+ static bool hasSameUnqualifiedType(QualType T1, QualType T2) {
return getCanonicalType(T1).getTypePtr() ==
getCanonicalType(T2).getTypePtr();
}
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index 22e60aa..eb48a0c 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -1572,6 +1572,23 @@ def HIPManaged : InheritableAttr {
let Documentation = [HIPManagedAttrDocs];
}
+def CUDAClusterDims : InheritableAttr {
+ let Spellings = [GNU<"cluster_dims">];
+ let Args = [ExprArgument<"X">, ExprArgument<"Y", /*opt=*/1>, ExprArgument<"Z", /*opt=*/1>];
+ let Subjects = SubjectList<[ObjCMethod, FunctionLike]>;
+ let LangOpts = [CUDA];
+ let Documentation = [CUDAClusterDimsAttrDoc];
+}
+
+def CUDANoCluster : InheritableAttr {
+ let Spellings = [GNU<"no_cluster">];
+ let Subjects = SubjectList<[ObjCMethod, FunctionLike]>;
+ let LangOpts = [CUDA];
+ let Documentation = [CUDANoClusterAttrDoc];
+}
+
+def : MutualExclusions<[CUDAClusterDims, CUDANoCluster]>;
+
def CUDAInvalidTarget : InheritableAttr {
let Spellings = [];
let Subjects = SubjectList<[Function]>;
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index e0bbda0..2fdd041 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -7545,6 +7545,45 @@ A managed variable can be accessed in both device and host code.
}];
}
+def CUDAClusterDimsAttrDoc : Documentation {
+ let Category = DocCatDecl;
+ let Content = [{
+In CUDA/HIP programming, the ``cluster_dims`` attribute, conventionally exposed as the
+``__cluster_dims__`` macro, can be applied to a kernel function to set the dimensions of a
+thread block cluster, which is an optional level of hierarchy and made up of thread blocks.
+``__cluster_dims__`` defines the cluster size as ``(X, Y, Z)``, where each value is the number
+of thread blocks in that dimension. The ``cluster_dims`` and `no_cluster`` attributes are
+mutually exclusive.
+
+.. code::
+
+ __global__ __cluster_dims__(2, 1, 1) void kernel(...) {
+ ...
+ }
+
+ }];
+}
+
+def CUDANoClusterAttrDoc : Documentation {
+ let Category = DocCatDecl;
+ let Content = [{
+In CUDA/HIP programming, a kernel function can still be launched with the cluster feature enabled
+at runtime, even without being annotated with ``__cluster_dims__``. The LLVM/Clang-exclusive
+``no_cluster`` attribute, conventionally exposed as the ``__no_cluster__`` macro, can be applied to
+a kernel function to explicitly indicate that the cluster feature will not be enabled either at
+compile time or at kernel launch time. This allows the compiler to apply certain optimizations
+without assuming that clustering could be enabled at runtime. It is undefined behavior to launch a
+kernel annotated with ``__no_cluster__`` if the cluster feature is enabled at runtime.
+The ``cluster_dims`` and ``no_cluster`` attributes are mutually exclusive.
+
+.. code::
+
+ __global__ __no_cluster__ void kernel(...) {
+ ...
+ }
+ }];
+}
+
def LifetimeOwnerDocs : Documentation {
let Category = DocCatDecl;
let Content = [{
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index 792e2e0..a350acd 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -4957,6 +4957,18 @@ def HLSLResourceNonUniformIndex : LangBuiltin<"HLSL_LANG"> {
let Prototype = "uint32_t(uint32_t)";
}
+def HLSLResourceGetDimensionsX : LangBuiltin<"HLSL_LANG"> {
+ let Spellings = ["__builtin_hlsl_resource_getdimensions_x"];
+ let Attributes = [NoThrow];
+ let Prototype = "void(...)";
+}
+
+def HLSLResourceGetStride : LangBuiltin<"HLSL_LANG"> {
+ let Spellings = ["__builtin_hlsl_resource_getstride"];
+ let Attributes = [NoThrow];
+ let Prototype = "void(...)";
+}
+
def HLSLAll : LangBuiltin<"HLSL_LANG"> {
let Spellings = ["__builtin_hlsl_all"];
let Attributes = [NoThrow, Const];
diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index 62c70fba..d03c778 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -124,13 +124,13 @@ let Attributes = [Const, NoThrow, RequiredVectorWidth<128>] in {
}
let Features = "ssse3" in {
- def pmulhrsw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def psignb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">;
def psignw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def psignd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
}
let Features = "ssse3", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+ def pmulhrsw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def pmaddubsw128 : X86Builtin<"_Vector<8, short>(_Vector<16, char>, _Vector<16, char>)">;
def pshufb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">;
}
@@ -608,7 +608,6 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def palignr256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>, _Constant int)">;
def pmovmskb256 : X86Builtin<"int(_Vector<32, char>)">;
- def pmulhrsw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
def psadbw256 : X86Builtin<"_Vector<4, long long int>(_Vector<32, char>, _Vector<32, char>)">;
def psignb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>)">;
def psignw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
@@ -661,6 +660,7 @@ let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWi
def psrawi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
def psradi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
+ def pmulhrsw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
def pmulhuw256 : X86Builtin<"_Vector<16, unsigned short>(_Vector<16, unsigned short>, _Vector<16, unsigned short>)">;
def pmulhw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
@@ -1386,13 +1386,10 @@ let Features = "avx512bitalg", Attributes = [NoThrow, Const, RequiredVectorWidth
def vpshufbitqmb512_mask : X86Builtin<"unsigned long long int(_Vector<64, char>, _Vector<64, char>, unsigned long long int)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def pmulhrsw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
-}
-
let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def pavgb512 : X86Builtin<"_Vector<64, unsigned char>(_Vector<64, unsigned char>, _Vector<64, unsigned char>)">;
def pavgw512 : X86Builtin<"_Vector<32, unsigned short>(_Vector<32, unsigned short>, _Vector<32, unsigned short>)">;
+ def pmulhrsw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
def pmulhuw512 : X86Builtin<"_Vector<32, unsigned short>(_Vector<32, unsigned short>, _Vector<32, unsigned short>)">;
def pmulhw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
}
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 12fd7b08..22de85d 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -13070,6 +13070,12 @@ def warn_cuda_maxclusterrank_sm_90 : Warning<
"maxclusterrank requires sm_90 or higher, CUDA arch provided: %0, ignoring "
"%1 attribute">, InGroup<IgnoredAttributes>;
+def err_cluster_attr_not_supported : Error<
+ "%0 is not supported for this GPU architecture">;
+
+def err_cluster_dims_too_large : Error<
+ "cluster does not support more than %0 thread blocks; %1 provided">;
+
// VTable pointer authentication errors
def err_non_polymorphic_vtable_pointer_auth : Error<
"cannot set vtable pointer authentication on monomorphic type %0">;
diff --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td
index 412ef9a..2e5e1d9 100644
--- a/clang/include/clang/Basic/arm_mve.td
+++ b/clang/include/clang/Basic/arm_mve.td
@@ -831,9 +831,8 @@ multiclass contiguous_load<string mnemonic, PrimitiveType memtype,
NameOverride<mnemonic>;
def: Intrinsic<Vector, (args CPtr<CopyKind<same_size[0], Scalar>>:$addr,
Predicate:$pred),
- (IRIntBase<"masked_load", [Vector, CPtr<Vector>]>
- (CPtr<Vector> $addr), !srl(memtype.size,3),
- $pred, (zeroinit Vector))>,
+ (masked_load Vector, (CPtr<Vector> $addr),
+ !srl(memtype.size,3), $pred, (zeroinit Vector))>,
NameOverride<mnemonic # "_z">;
}
@@ -846,9 +845,8 @@ multiclass contiguous_load<string mnemonic, PrimitiveType memtype,
NameOverride<"vld1q">;
def: Intrinsic<Vector, (args CPtr<CopyKind<same_size[0], Scalar>>:$addr,
Predicate:$pred),
- (IRIntBase<"masked_load", [Vector, CPtr<Vector>]>
- (CPtr<Vector> $addr), !srl(memtype.size,3),
- $pred, (zeroinit Vector))>,
+ (masked_load Vector, (CPtr<Vector> $addr),
+ !srl(memtype.size,3), $pred, (zeroinit Vector))>,
NameOverride<"vld1q_z">;
}
@@ -863,9 +861,7 @@ multiclass contiguous_load<string mnemonic, PrimitiveType memtype,
NameOverride<mnemonic>;
def: Intrinsic<Vector, (args CPtr<CopyKind<same_size[0], Scalar>>:$addr,
Predicate:$pred),
- (extend (IRIntBase<"masked_load",
- [NarrowedVecOf<memtype,Vector>,
- CPtr<NarrowedVecOf<memtype,Vector>>]>
+ (extend (masked_load NarrowedVecOf<memtype,Vector>,
(CPtr<NarrowedVecOf<memtype,Vector>> $addr),
!srl(memtype.size,3), $pred,
(zeroinit NarrowedVecOf<memtype,Vector>)),
@@ -890,8 +886,7 @@ multiclass contiguous_store<string mnemonic, PrimitiveType memtype,
NameOverride<mnemonic>;
def: Intrinsic<Void, (args Ptr<CopyKind<same_size[0], Scalar>>:$addr,
Vector:$value, Predicate:$pred),
- (IRIntBase<"masked_store", [Vector, Ptr<Vector>]>
- $value, (Ptr<Vector> $addr),
+ (masked_store $value, (Ptr<Vector> $addr),
!srl(memtype.size,3), $pred)>,
NameOverride<mnemonic # "_p">;
}
@@ -907,8 +902,7 @@ multiclass contiguous_store<string mnemonic, PrimitiveType memtype,
NameOverride<"vst1q">;
def: Intrinsic<Void, (args Ptr<CopyKind<same_size[0], Scalar>>:$addr,
Vector:$value, Predicate:$pred),
- (IRIntBase<"masked_store", [Vector, Ptr<Vector>]>
- $value, (Ptr<Vector> $addr),
+ (masked_store $value, (Ptr<Vector> $addr),
!srl(memtype.size,3), $pred)>,
NameOverride<"vst1q_p">;
}
@@ -925,9 +919,7 @@ multiclass contiguous_store<string mnemonic, PrimitiveType memtype,
NameOverride<mnemonic>;
def: Intrinsic<Void, (args Ptr<CopyKind<same_size[0], Scalar>>:$addr,
Vector:$value, Predicate:$pred),
- (IRIntBase<"masked_store",
- [NarrowedVecOf<memtype,Vector>,
- Ptr<NarrowedVecOf<memtype,Vector>>]>
+ (masked_store
(trunc $value, NarrowedVecOf<memtype,Vector>),
(Ptr<NarrowedVecOf<memtype,Vector>> $addr),
!srl(memtype.size,3), $pred)>,
diff --git a/clang/include/clang/Basic/arm_mve_defs.td b/clang/include/clang/Basic/arm_mve_defs.td
index 083d03a..c1562a0 100644
--- a/clang/include/clang/Basic/arm_mve_defs.td
+++ b/clang/include/clang/Basic/arm_mve_defs.td
@@ -134,6 +134,13 @@ def unzip: CGHelperFn<"VectorUnzip"> {
}
def zip: CGHelperFn<"VectorZip">;
+def masked_load: IRBuilder<"CreateMaskedLoad"> {
+ let special_params = [IRBuilderIntParam<2, "Align">];
+}
+def masked_store: IRBuilder<"CreateMaskedStore"> {
+ let special_params = [IRBuilderIntParam<2, "Align">];
+}
+
// Trivial 'codegen' function that just returns its argument. Useful
// for wrapping up a variable name like $foo into a thing you can pass
// around as type 'dag'.
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 07a8724..96d8300 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -1013,9 +1013,9 @@ let ManualCodegen = [{
}] in {
let HasFRMRoundModeOp = true in {
// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
- defm vfadd : RVVFloatingBinBuiltinSetRoundingMode;
- defm vfsub : RVVFloatingBinBuiltinSetRoundingMode;
- defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode;
+ defm vfadd : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
+ defm vfsub : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
+ defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode<HasBF=1>;
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
@@ -1023,14 +1023,14 @@ let ManualCodegen = [{
defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode;
// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
- defm vfmul : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfmul : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
defm vfdiv : RVVFloatingBinBuiltinSetRoundingMode;
defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode;
}
// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
- defm vfadd : RVVFloatingBinBuiltinSet;
- defm vfsub : RVVFloatingBinBuiltinSet;
- defm vfrsub : RVVFloatingBinVFBuiltinSet;
+ defm vfadd : RVVFloatingBinBuiltinSet<HasBF=1>;
+ defm vfsub : RVVFloatingBinBuiltinSet<HasBF=1>;
+ defm vfrsub : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
@@ -1038,7 +1038,7 @@ let ManualCodegen = [{
defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet;
// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
- defm vfmul : RVVFloatingBinBuiltinSet;
+ defm vfmul : RVVFloatingBinBuiltinSet<HasBF=1>;
defm vfdiv : RVVFloatingBinBuiltinSet;
defm vfrdiv : RVVFloatingBinVFBuiltinSet;
}
@@ -1065,6 +1065,10 @@ let ManualCodegen = [{
defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x",
[["vv", "w", "wvvu"],
["vf", "w", "wveu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y",
+ [["vv", "vw", "wvvu"],
+ ["vf", "vw", "wveu"]]>;
}
}
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
@@ -1081,6 +1085,10 @@ let ManualCodegen = [{
defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x",
[["vv", "w", "wvv"],
["vf", "w", "wve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y",
+ [["vv", "vw", "wvv"],
+ ["vf", "vw", "wve"]]>;
}
}
}
@@ -1170,6 +1178,8 @@ let ManualCodegen = [{
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vvu"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vvu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vvu"]]>;
}
// 13.8. Vector Floating-Point Square-Root Instruction
defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "fd", [["v", "v", "vv"]]>;
@@ -1180,21 +1190,26 @@ let ManualCodegen = [{
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vv"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vv"]]>;
}
// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "fd", [["v", "v", "vv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "x", [["v", "v", "vv"]]>;
+let RequiredFeatures = ["zvfbfa"] in
+ defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "y", [["v", "v", "vv"]]>;
+
// 13.11. Vector Floating-Point MIN/MAX Instructions
-defm vfmin : RVVFloatingBinBuiltinSet;
-defm vfmax : RVVFloatingBinBuiltinSet;
+defm vfmin : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfmax : RVVFloatingBinBuiltinSet<HasBF=1>;
// 13.12. Vector Floating-Point Sign-Injection Instructions
-defm vfsgnj : RVVFloatingBinBuiltinSet;
-defm vfsgnjn : RVVFloatingBinBuiltinSet;
-defm vfsgnjx : RVVFloatingBinBuiltinSet;
+defm vfsgnj : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfsgnjn : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfsgnjx : RVVFloatingBinBuiltinSet<HasBF=1>;
}
defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "fd">;
let RequiredFeatures = ["zvfh"] in
@@ -1219,6 +1234,8 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vfclass : RVVOp0BuiltinSet<"vfclass", "fd", [["v", "Uv", "Uvv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfclass : RVVOp0BuiltinSet<"vfclass", "x", [["v", "Uv", "Uvv"]]>;
+let RequiredFeatures = ["zvfbfa"] in
+ defm vfclass : RVVOp0BuiltinSet<"vfclass", "y", [["v", "vUv", "Uvv"]]>;
}
// 13.15. Vector Floating-Point Merge Instruction
@@ -1239,6 +1256,9 @@ let HasMasked = false,
let RequiredFeatures = ["zvfh"] in
defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "x",
[["vfm", "v", "vvem"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "y",
+ [["vfm", "v", "vvem"]]>;
}
// 13.16. Vector Floating-Point Move Instruction
@@ -1252,6 +1272,9 @@ let HasMasked = false,
let RequiredFeatures = ["zvfh"] in
defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "x",
[["f", "v", "ve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "y",
+ [["f", "v", "ve"]]>;
}
// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
@@ -1287,10 +1310,16 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Fw", "Fwv"]]>;
}
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfwcvt_f_bf16" in {
+ defm : RVVConvBuiltinSet<"vfwcvt_f_xu_v", "c", [["Yw", "YwUv"]]>;
+ defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Yw", "Ywv"]]>;
+ }
let OverloadedName = "vfwcvt_f" in {
defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "f", [["w", "wv"]]>;
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "x", [["w", "wv"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "y", [["vw", "wv"]]>;
}
}
@@ -1300,17 +1329,23 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "si", [["Uv", "UvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["Uv", "UvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["YwUv", "UvYw"]]>;
}
let OverloadedName = "vfncvt_rtz_x" in {
defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "si", [["Iv", "IvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["Iv", "IvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["YwIv", "IvYw"]]>;
}
let OverloadedName = "vfncvt_rod_f" in {
defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "f", [["v", "vw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "x", [["v", "vw"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_rod_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "y", [["v", "vw"]]>;
}
// Zvfbfmin - Vector convert BF16 to FP32
@@ -1363,11 +1398,15 @@ let ManualCodegen = [{
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFwu"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFwu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYwu"]]>;
}
let OverloadedName = "vfncvt_xu" in {
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFwu"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFwu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYwu"]]>;
}
let OverloadedName = "vfncvt_f" in {
defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIwu"]]>;
@@ -1382,6 +1421,8 @@ let ManualCodegen = [{
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vwu"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vwu"]]>;
}
// Zvfbfmin - Vector convert FP32 to BF16
@@ -1430,11 +1471,15 @@ let ManualCodegen = [{
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYw"]]>;
}
let OverloadedName = "vfncvt_xu" in {
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYw"]]>;
}
let OverloadedName = "vfncvt_f" in {
defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIw"]]>;
@@ -1449,6 +1494,8 @@ let ManualCodegen = [{
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vw"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vw"]]>;
}
// Zvfbfmin - Vector convert FP32 to BF16
@@ -1578,6 +1625,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
let RequiredFeatures = ["zvfh"] in
defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "x",
[["s", "ve", "ev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "y",
+ [["s", "ve", "ev"]]>;
}
let OverloadedName = "vfmv_s",
UnMaskedPolicyScheme = HasPassthruOperand,
@@ -1589,6 +1639,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "x",
[["f", "v", "ve"],
["x", "Uv", "UvUe"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "y",
+ [["f", "v", "ve"]]>;
}
}
@@ -1601,11 +1654,11 @@ defm vslidedown : RVVSlideDownBuiltinSet;
// 16.3.3. Vector Slide1up Instructions
let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vslide1up : RVVSlideOneBuiltinSet;
-defm vfslide1up : RVVFloatingBinVFBuiltinSet;
+defm vfslide1up : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 16.3.4. Vector Slide1down Instruction
defm vslide1down : RVVSlideOneBuiltinSet;
-defm vfslide1down : RVVFloatingBinVFBuiltinSet;
+defm vfslide1down : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 16.4. Vector Register Gather Instructions
// signed and floating type
diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td
index 767bcee..eaa2ba4 100644
--- a/clang/include/clang/Basic/riscv_vector_common.td
+++ b/clang/include/clang/Basic/riscv_vector_common.td
@@ -83,6 +83,8 @@
// elements of the same width
// F: given a vector type, compute the vector type with floating-point type
// elements of the same width
+// Y: given a vector type, compute the vector type with bfloat16 type elements
+// of the same width
// S: given a vector type, computes its equivalent one for LMUL=1. This is a
// no-op if the vector was already LMUL=1
// (Log2EEW:Value): Log2EEW value could be 3/4/5/6 (8/16/32/64), given a
@@ -470,6 +472,10 @@ let HasMaskedOffOperand = false in {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvv"],
["vf", "v", "vvev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvv"],
+ ["vf", "v", "vvev"]]>;
}
multiclass RVVFloatingTerBuiltinSetRoundingMode {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
@@ -479,6 +485,10 @@ let HasMaskedOffOperand = false in {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvvu"],
["vf", "v", "vvevu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvvu"],
+ ["vf", "v", "vvevu"]]>;
}
}
@@ -491,6 +501,10 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "x",
[["vv", "w", "wwvv"],
["vf", "w", "wwev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "y",
+ [["vv", "vw", "wwvv"],
+ ["vf", "vw", "wwev"]]>;
}
multiclass RVVFloatingWidenTerBuiltinSetRoundingMode {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "f",
@@ -500,10 +514,14 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "x",
[["vv", "w", "wwvvu"],
["vf", "w", "wwevu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "y",
+ [["vv", "vw", "wwvvu"],
+ ["vf", "vw", "wwevu"]]>;
}
}
-multiclass RVVFloatingBinBuiltinSet {
+multiclass RVVFloatingBinBuiltinSet<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vv", "v", "vvv"],
["vf", "v", "vve"]]>;
@@ -511,9 +529,15 @@ multiclass RVVFloatingBinBuiltinSet {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvv"],
["vf", "v", "vve"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvv"],
+ ["vf", "v", "vve"]]>;
+ }
}
-multiclass RVVFloatingBinBuiltinSetRoundingMode {
+multiclass RVVFloatingBinBuiltinSetRoundingMode<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vv", "v", "vvvu"],
["vf", "v", "vveu"]]>;
@@ -521,22 +545,38 @@ multiclass RVVFloatingBinBuiltinSetRoundingMode {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvu"],
["vf", "v", "vveu"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvu"],
+ ["vf", "v", "vveu"]]>;
+ }
}
-multiclass RVVFloatingBinVFBuiltinSet {
+multiclass RVVFloatingBinVFBuiltinSet<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vf", "v", "vve"]]>;
let RequiredFeatures = ["zvfh"] in
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vf", "v", "vve"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vf", "v", "vve"]]>;
+ }
}
-multiclass RVVFloatingBinVFBuiltinSetRoundingMode {
+multiclass RVVFloatingBinVFBuiltinSetRoundingMode<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vf", "v", "vveu"]]>;
let RequiredFeatures = ["zvfh"] in
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vf", "v", "vveu"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vf", "v", "vveu"]]>;
+ }
}
multiclass RVVFloatingMaskOutBuiltinSet {
@@ -547,6 +587,10 @@ multiclass RVVFloatingMaskOutBuiltinSet {
defm "" : RVVOp0Op1BuiltinSet<NAME, "x",
[["vv", "vm", "mvv"],
["vf", "vm", "mve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOp0Op1BuiltinSet<NAME, "y",
+ [["vv", "vm", "mvv"],
+ ["vf", "vm", "mve"]]>;
}
multiclass RVVFloatingMaskOutVFBuiltinSet
@@ -748,6 +792,10 @@ multiclass RVVFloatingWidenBinBuiltinSet {
defm "" : RVVWidenBuiltinSet<NAME, "x",
[["vv", "w", "wvv"],
["vf", "w", "wve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenBuiltinSet<NAME, "y",
+ [["vv", "vw", "wvv"],
+ ["vf", "vw", "wve"]]>;
}
multiclass RVVFloatingWidenBinBuiltinSetRoundingMode {
@@ -758,6 +806,10 @@ multiclass RVVFloatingWidenBinBuiltinSetRoundingMode {
defm "" : RVVWidenBuiltinSet<NAME, "x",
[["vv", "w", "wvvu"],
["vf", "w", "wveu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenBuiltinSet<NAME, "y",
+ [["vv", "vw", "wvvu"],
+ ["vf", "vw", "wveu"]]>;
}
multiclass RVVFloatingWidenOp0BinBuiltinSet {
@@ -768,6 +820,10 @@ multiclass RVVFloatingWidenOp0BinBuiltinSet {
defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "x",
[["wv", "w", "wwv"],
["wf", "w", "wwe"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "y",
+ [["wv", "vw", "wwv"],
+ ["wf", "ew", "wwe"]]>;
}
multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode {
@@ -778,4 +834,8 @@ multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode {
defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "x",
[["wv", "w", "wwvu"],
["wf", "w", "wweu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "y",
+ [["wv", "vw", "wwvu"],
+ ["wf", "ew", "wweu"]]>;
}
diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td
index 69dbad3..1e0fb03 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td
@@ -967,6 +967,49 @@ def CIR_TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> {
`<` custom<RecordMembers>($data) `>`
}];
}
+//===----------------------------------------------------------------------===//
+// InlineAttr
+//===----------------------------------------------------------------------===//
+
+def CIR_InlineKind : CIR_I32EnumAttr<"InlineKind", "inlineKind", [
+ I32EnumAttrCase<"NoInline", 1, "never">,
+ I32EnumAttrCase<"AlwaysInline", 2, "always">,
+ I32EnumAttrCase<"InlineHint", 3, "hint">
+]> {
+ let genSpecializedAttr = 0;
+}
+
+def CIR_InlineAttr : CIR_EnumAttr<CIR_InlineKind, "inline"> {
+ let summary = "Inline attribute";
+ let description = [{
+ Inline attribute represents user directives for inlining behavior.
+ This attribute is only used by `cir.func` operations.
+
+ Values:
+ - `never`: Prevents the function from being inlined (__attribute__((noinline)))
+ - `always`: Forces the function to be inlined (__attribute__((always_inline)))
+ - `hint`: Suggests the function should be inlined (inline keyword)
+
+ Example:
+ ```
+ cir.func @noinline_func(%arg0: !s32i) -> !s32i inline(never) {
+ cir.return %arg0 : !s32i
+ }
+ cir.func @always_inline_func() -> !s32i inline(always) {
+ %0 = cir.const #cir.int<42> : !s32i
+ cir.return %0 : !s32i
+ }
+ ```
+ }];
+
+ let cppClassName = "InlineAttr";
+
+ let extraClassDeclaration = [{
+ bool isNoInline() const { return getValue() == InlineKind::NoInline; };
+ bool isAlwaysInline() const { return getValue() == InlineKind::AlwaysInline; };
+ bool isInlineHint() const { return getValue() == InlineKind::InlineHint; };
+ }];
+}
//===----------------------------------------------------------------------===//
// CatchAllAttr & UnwindAttr
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 3988a6d..e0163a4 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -2476,6 +2476,10 @@ def CIR_FuncOp : CIR_Op<"func", [
Similarly, for global destructors both `global_dtor` and
`global_dtor(<priority>)` are available.
+ The `inline(never)` keyword marks a function that should not be inlined.
+ The `inline(always)` keyword marks a function that should always be inlined.
+ The `inline(hint)` keyword suggests that the function should be inlined.
+
Example:
```mlir
@@ -2510,6 +2514,7 @@ def CIR_FuncOp : CIR_Op<"func", [
UnitAttr:$dso_local,
DefaultValuedAttr<CIR_GlobalLinkageKind,
"cir::GlobalLinkageKind::ExternalLinkage">:$linkage,
+ OptionalAttr<CIR_InlineAttr>:$inline_kind,
OptionalAttr<StrAttr>:$sym_visibility,
UnitAttr:$comdat,
OptionalAttr<DictArrayAttr>:$arg_attrs,
@@ -4403,12 +4408,12 @@ def CIR_TryOp : CIR_Op<"try",[
let arguments = (ins
UnitAttr:$synthetic,
UnitAttr:$cleanup,
- CIR_TryHandlerArrayAttr:$handler_types
+ DefaultValuedAttr<CIR_TryHandlerArrayAttr, "{}">:$handler_types
);
let regions = (region
AnyRegion:$try_region,
- VariadicRegion<MinSizedRegion<1>>:$handler_regions
+ VariadicRegion<AnyRegion>:$handler_regions
);
let assemblyFormat = [{
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
index 17fddae..dbd0304 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
+++ b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
@@ -54,10 +54,10 @@ static bool isLocalLinkage(GlobalLinkageKind linkage) {
static bool isExternalWeakLinkage(GlobalLinkageKind linkage) {
return linkage == GlobalLinkageKind::ExternalWeakLinkage;
}
-LLVM_ATTRIBUTE_UNUSED static bool isCommonLinkage(GlobalLinkageKind linkage) {
+[[maybe_unused]] static bool isCommonLinkage(GlobalLinkageKind linkage) {
return linkage == GlobalLinkageKind::CommonLinkage;
}
-LLVM_ATTRIBUTE_UNUSED static bool
+[[maybe_unused]] static bool
isValidDeclarationLinkage(GlobalLinkageKind linkage) {
return isExternalWeakLinkage(linkage) || isExternalLinkage(linkage);
}
@@ -65,8 +65,7 @@ isValidDeclarationLinkage(GlobalLinkageKind linkage) {
/// Whether the definition of this global may be replaced by something
/// non-equivalent at link time. For example, if a function has weak linkage
/// then the code defining it may be replaced by different code.
-LLVM_ATTRIBUTE_UNUSED static bool
-isInterposableLinkage(GlobalLinkageKind linkage) {
+[[maybe_unused]] static bool isInterposableLinkage(GlobalLinkageKind linkage) {
switch (linkage) {
case GlobalLinkageKind::WeakAnyLinkage:
case GlobalLinkageKind::LinkOnceAnyLinkage:
@@ -89,8 +88,7 @@ isInterposableLinkage(GlobalLinkageKind linkage) {
/// Whether the definition of this global may be discarded if it is not used
/// in its compilation unit.
-LLVM_ATTRIBUTE_UNUSED static bool
-isDiscardableIfUnused(GlobalLinkageKind linkage) {
+[[maybe_unused]] static bool isDiscardableIfUnused(GlobalLinkageKind linkage) {
return isLinkOnceLinkage(linkage) || isLocalLinkage(linkage) ||
isAvailableExternallyLinkage(linkage);
}
@@ -99,7 +97,7 @@ isDiscardableIfUnused(GlobalLinkageKind linkage) {
/// Using this method outside of the code generators is almost always a
/// mistake: when working at the IR level use isInterposable instead as it
/// knows about ODR semantics.
-LLVM_ATTRIBUTE_UNUSED static bool isWeakForLinker(GlobalLinkageKind linkage) {
+[[maybe_unused]] static bool isWeakForLinker(GlobalLinkageKind linkage) {
return linkage == GlobalLinkageKind::WeakAnyLinkage ||
linkage == GlobalLinkageKind::WeakODRLinkage ||
linkage == GlobalLinkageKind::LinkOnceAnyLinkage ||
@@ -108,7 +106,7 @@ LLVM_ATTRIBUTE_UNUSED static bool isWeakForLinker(GlobalLinkageKind linkage) {
linkage == GlobalLinkageKind::ExternalWeakLinkage;
}
-LLVM_ATTRIBUTE_UNUSED static bool isValidLinkage(GlobalLinkageKind gl) {
+[[maybe_unused]] static bool isValidLinkage(GlobalLinkageKind gl) {
return isExternalLinkage(gl) || isLocalLinkage(gl) || isWeakLinkage(gl) ||
isLinkOnceLinkage(gl);
}
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index de3bc94..090cf35 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -69,24 +69,31 @@ struct MissingFeatures {
static bool opAllocaCaptureByInit() { return false; }
// FuncOp handling
- static bool opFuncOpenCLKernelMetadata() { return false; }
+ static bool opFuncArmNewAttr() { return false; }
+ static bool opFuncArmStreamingAttr() { return false; }
static bool opFuncAstDeclAttr() { return false; }
- static bool opFuncAttributesForDefinition() { return false; }
static bool opFuncCallingConv() { return false; }
+ static bool opFuncColdHotAttr() { return false; }
static bool opFuncCPUAndFeaturesAttributes() { return false; }
static bool opFuncExceptions() { return false; }
static bool opFuncExtraAttrs() { return false; }
static bool opFuncMaybeHandleStaticInExternC() { return false; }
+ static bool opFuncMinSizeAttr() { return false; }
static bool opFuncMultipleReturnVals() { return false; }
+ static bool opFuncNakedAttr() { return false; }
+ static bool opFuncNoDuplicateAttr() { return false; }
static bool opFuncNoUnwind() { return false; }
+ static bool opFuncOpenCLKernelMetadata() { return false; }
static bool opFuncOperandBundles() { return false; }
+ static bool opFuncOptNoneAttr() { return false; }
static bool opFuncParameterAttributes() { return false; }
static bool opFuncReadOnly() { return false; }
static bool opFuncSection() { return false; }
+ static bool opFuncUnwindTablesAttr() { return false; }
static bool opFuncWillReturn() { return false; }
static bool opFuncNoReturn() { return false; }
- static bool setLLVMFunctionFEnvAttributes() { return false; }
static bool setFunctionAttributes() { return false; }
+ static bool setLLVMFunctionFEnvAttributes() { return false; }
// CallOp handling
static bool opCallAggregateArgs() { return false; }
@@ -271,6 +278,7 @@ struct MissingFeatures {
static bool objCBlocks() { return false; }
static bool objCGC() { return false; }
static bool objCLifetime() { return false; }
+ static bool hlsl() { return false; }
static bool openCL() { return false; }
static bool openMP() { return false; }
static bool opTBAA() { return false; }
@@ -288,6 +296,7 @@ struct MissingFeatures {
static bool sourceLanguageCases() { return false; }
static bool stackBase() { return false; }
static bool stackSaveOp() { return false; }
+ static bool stackProtector() { return false; }
static bool targetCIRGenInfoArch() { return false; }
static bool targetCIRGenInfoOS() { return false; }
static bool targetCodeGenInfoGetNullPointer() { return false; }
diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h
index 3df5b92..2852c4a 100644
--- a/clang/include/clang/Format/Format.h
+++ b/clang/include/clang/Format/Format.h
@@ -94,7 +94,7 @@ struct FormatStyle {
///
/// \note
/// This currently only applies to braced initializer lists (when
- /// ``Cpp11BracedListStyle`` is ``true``) and parentheses.
+ /// ``Cpp11BracedListStyle`` is not ``Block``) and parentheses.
/// \endnote
BAS_BlockIndent,
};
@@ -2555,29 +2555,67 @@ struct FormatStyle {
/// \version 3.7
unsigned ContinuationIndentWidth;
- /// If ``true``, format braced lists as best suited for C++11 braced
- /// lists.
- ///
- /// Important differences:
- ///
- /// * No spaces inside the braced list.
- /// * No line break before the closing brace.
- /// * Indentation with the continuation indent, not with the block indent.
- ///
- /// Fundamentally, C++11 braced lists are formatted exactly like function
- /// calls would be formatted in their place. If the braced list follows a name
- /// (e.g. a type or variable name), clang-format formats as if the ``{}`` were
- /// the parentheses of a function call with that name. If there is no name,
- /// a zero-length name is assumed.
- /// \code
- /// true: false:
- /// vector<int> x{1, 2, 3, 4}; vs. vector<int> x{ 1, 2, 3, 4 };
- /// vector<T> x{{}, {}, {}, {}}; vector<T> x{ {}, {}, {}, {} };
- /// f(MyMap[{composite, key}]); f(MyMap[{ composite, key }]);
- /// new int[3]{1, 2, 3}; new int[3]{ 1, 2, 3 };
- /// \endcode
+ /// Different ways to handle braced lists.
+ enum BracedListStyle : int8_t {
+ /// Best suited for pre C++11 braced lists.
+ ///
+ /// * Spaces inside the braced list.
+ /// * Line break before the closing brace.
+ /// * Indentation with the block indent.
+ ///
+ /// \code
+ /// vector<int> x{ 1, 2, 3, 4 };
+ /// vector<T> x{ {}, {}, {}, {} };
+ /// f(MyMap[{ composite, key }]);
+ /// new int[3]{ 1, 2, 3 };
+ /// Type name{ // Comment
+ /// value
+ /// };
+ /// \endcode
+ BLS_Block,
+ /// Best suited for C++11 braced lists.
+ ///
+ /// * No spaces inside the braced list.
+ /// * No line break before the closing brace.
+ /// * Indentation with the continuation indent.
+ ///
+ /// Fundamentally, C++11 braced lists are formatted exactly like function
+ /// calls would be formatted in their place. If the braced list follows a
+ /// name (e.g. a type or variable name), clang-format formats as if the
+ /// ``{}`` were the parentheses of a function call with that name. If there
+ /// is no name, a zero-length name is assumed.
+ /// \code
+ /// vector<int> x{1, 2, 3, 4};
+ /// vector<T> x{{}, {}, {}, {}};
+ /// f(MyMap[{composite, key}]);
+ /// new int[3]{1, 2, 3};
+ /// Type name{ // Comment
+ /// value};
+ /// \endcode
+ BLS_FunctionCall,
+ /// Same as ``FunctionCall``, except for the handling of a comment at the
+ /// begin, it then aligns everything following with the comment.
+ ///
+ /// * No spaces inside the braced list. (Even for a comment at the first
+ /// position.)
+ /// * No line break before the closing brace.
+ /// * Indentation with the continuation indent, except when followed by a
+ /// line comment, then it uses the block indent.
+ ///
+ /// \code
+ /// vector<int> x{1, 2, 3, 4};
+ /// vector<T> x{{}, {}, {}, {}};
+ /// f(MyMap[{composite, key}]);
+ /// new int[3]{1, 2, 3};
+ /// Type name{// Comment
+ /// value};
+ /// \endcode
+ BLS_AlignFirstComment,
+ };
+
+ /// The style to handle braced lists.
/// \version 3.4
- bool Cpp11BracedListStyle;
+ BracedListStyle Cpp11BracedListStyle;
/// This option is **deprecated**. See ``DeriveLF`` and ``DeriveCRLF`` of
/// ``LineEnding``.
@@ -4933,7 +4971,7 @@ struct FormatStyle {
/// Specifies when to insert a space in empty braces.
/// \note
/// This option doesn't apply to initializer braces if
- /// ``Cpp11BracedListStyle`` is set to ``true``.
+ /// ``Cpp11BracedListStyle`` is not ``Block``.
/// \endnote
/// \version 22
SpaceInEmptyBracesStyle SpaceInEmptyBraces;
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 37598f8..add4c15 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -3961,6 +3961,13 @@ public:
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = {});
+private:
+ // Perform a check on an AsmLabel to verify its consistency and emit
+ // diagnostics in case of an error.
+ void CheckAsmLabel(Scope *S, Expr *AsmLabelExpr, StorageClass SC,
+ TypeSourceInfo *TInfo, VarDecl *);
+
+public:
/// Perform semantic checking on a newly-created variable
/// declaration.
///
@@ -5010,6 +5017,14 @@ public:
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks, Expr *MaxBlocks);
+ /// Add a cluster_dims attribute to a particular declaration.
+ CUDAClusterDimsAttr *createClusterDimsAttr(const AttributeCommonInfo &CI,
+ Expr *X, Expr *Y, Expr *Z);
+ void addClusterDimsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *X,
+ Expr *Y, Expr *Z);
+ /// Add a no_cluster attribute to a particular declaration.
+ void addNoClusterAttr(Decl *D, const AttributeCommonInfo &CI);
+
enum class RetainOwnershipKind { NS, CF, OS };
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
@@ -13385,6 +13400,13 @@ public:
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
+ /// Substitute concept template arguments in the constraint expression
+ /// of a concept-id. This is used to implement [temp.constr.normal].
+ ExprResult
+ SubstConceptTemplateArguments(const ConceptSpecializationExpr *CSE,
+ const Expr *ConstraintExpr,
+ const MultiLevelTemplateArgumentList &MLTAL);
+
bool SubstTemplateArgumentsInParameterMapping(
ArrayRef<TemplateArgumentLoc> Args, SourceLocation BaseLoc,
const MultiLevelTemplateArgumentList &TemplateArgs,
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
index c233ca1..4aee165 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -211,6 +211,16 @@ protected:
getExtraInvalidatedValues(ValueList &Values,
RegionAndSymbolInvalidationTraits *ETraits) const {}
+ /// A state for looking up relevant Environment entries (arguments, return
+ /// value), dynamic type information and similar "stable" things.
+ /// WARNING: During the evaluation of a function call, several state
+ /// transitions happen, so this state can become partially obsolete!
+ ///
+ /// TODO: Instead of storing a complete state object in the CallEvent, only
+ /// store the relevant parts (such as argument/return SVals etc.) that aren't
+ /// allowed to become obsolete until the end of the call evaluation.
+ ProgramStateRef getState() const { return State; }
+
public:
CallEvent &operator=(const CallEvent &) = delete;
virtual ~CallEvent() = default;
@@ -231,8 +241,11 @@ public:
}
void setForeign(bool B) const { Foreign = B; }
- /// The state in which the call is being evaluated.
- const ProgramStateRef &getState() const { return State; }
+ /// NOTE: There are plans for refactoring that would eliminate this method.
+ /// Prefer to use CheckerContext::getASTContext if possible!
+ const ASTContext &getASTContext() const {
+ return getState()->getStateManager().getContext();
+ }
/// The context in which the call is being evaluated.
const LocationContext *getLocationContext() const { return LCtx; }
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index 812d25f..2f7e2d9 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -2258,6 +2258,8 @@ std::optional<Pointer> OffsetHelper(InterpState &S, CodePtr OpPC,
S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_array_index)
<< N << /*non-array*/ true << 0;
return Pointer(Ptr.asFunctionPointer().getFunction(), N);
+ } else if (!Ptr.isBlockPointer()) {
+ return std::nullopt;
}
assert(Ptr.isBlockPointer());
diff --git a/clang/lib/AST/ByteCode/InterpBlock.cpp b/clang/lib/AST/ByteCode/InterpBlock.cpp
index ac6f01f..24825ad 100644
--- a/clang/lib/AST/ByteCode/InterpBlock.cpp
+++ b/clang/lib/AST/ByteCode/InterpBlock.cpp
@@ -100,6 +100,19 @@ bool Block::hasPointer(const Pointer *P) const {
}
#endif
+void Block::movePointersTo(Block *B) {
+ assert(B != this);
+
+ while (Pointers) {
+ Pointer *P = Pointers;
+
+ this->removePointer(P);
+ P->BS.Pointee = B;
+ B->addPointer(P);
+ }
+ assert(!this->hasPointers());
+}
+
DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
: Root(Root), B(~0u, Blk->Desc, Blk->isExtern(), Blk->IsStatic,
Blk->isWeak(), Blk->isDummy(), /*IsDead=*/true) {
diff --git a/clang/lib/AST/ByteCode/InterpBlock.h b/clang/lib/AST/ByteCode/InterpBlock.h
index 9b3dadc..73fdc8d 100644
--- a/clang/lib/AST/ByteCode/InterpBlock.h
+++ b/clang/lib/AST/ByteCode/InterpBlock.h
@@ -92,6 +92,8 @@ public:
bool isInitialized() const { return IsInitialized; }
/// The Evaluation ID this block was created in.
unsigned getEvalID() const { return EvalID; }
+ /// Move all pointers from this block to \param B.
+ void movePointersTo(Block *B);
/// Returns a pointer to the stored data.
/// You are allowed to read Desc->getSize() bytes from this address.
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index a0d2c76..0cb4910 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -23,7 +23,7 @@
namespace clang {
namespace interp {
-LLVM_ATTRIBUTE_UNUSED static bool isNoopBuiltin(unsigned ID) {
+[[maybe_unused]] static bool isNoopBuiltin(unsigned ID) {
switch (ID) {
case Builtin::BIas_const:
case Builtin::BIforward:
@@ -3285,14 +3285,14 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll:
return interp__builtin_elementwise_int_unaryop(
- S, OpPC, Call, [](const APSInt &Val) -> APInt {
+ S, OpPC, Call, [](const APSInt &Val) {
return APInt(Val.getBitWidth(), Val.popcount() % 2);
});
case Builtin::BI__builtin_clrsb:
case Builtin::BI__builtin_clrsbl:
case Builtin::BI__builtin_clrsbll:
return interp__builtin_elementwise_int_unaryop(
- S, OpPC, Call, [](const APSInt &Val) -> APInt {
+ S, OpPC, Call, [](const APSInt &Val) {
return APInt(Val.getBitWidth(),
Val.getBitWidth() - Val.getSignificantBits());
});
@@ -3301,8 +3301,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_bitreverse32:
case Builtin::BI__builtin_bitreverse64:
return interp__builtin_elementwise_int_unaryop(
- S, OpPC, Call,
- [](const APSInt &Val) -> APInt { return Val.reverseBits(); });
+ S, OpPC, Call, [](const APSInt &Val) { return Val.reverseBits(); });
case Builtin::BI__builtin_classify_type:
return interp__builtin_classify_type(S, OpPC, Frame, Call);
@@ -3622,6 +3621,15 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case clang::X86::BI__builtin_ia32_pmulhrsw128:
+ case clang::X86::BI__builtin_ia32_pmulhrsw256:
+ case clang::X86::BI__builtin_ia32_pmulhrsw512:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
+ return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
+ .extractBits(16, 1);
+ });
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp
index 75bfd9f..e653782 100644
--- a/clang/lib/AST/ByteCode/Program.cpp
+++ b/clang/lib/AST/ByteCode/Program.cpp
@@ -226,11 +226,7 @@ UnsignedOrNone Program::createGlobal(const ValueDecl *VD, const Expr *Init) {
Globals[PIdx] = NewGlobal;
// All pointers pointing to the previous extern decl now point to the
// new decl.
- for (Pointer *Ptr = RedeclBlock->Pointers; Ptr; Ptr = Ptr->BS.Next) {
- RedeclBlock->removePointer(Ptr);
- Ptr->BS.Pointee = NewGlobal->block();
- NewGlobal->block()->addPointer(Ptr);
- }
+ RedeclBlock->movePointersTo(NewGlobal->block());
}
}
PIdx = *Idx;
diff --git a/clang/lib/AST/Comment.cpp b/clang/lib/AST/Comment.cpp
index 37e21c3..361a8a7e 100644
--- a/clang/lib/AST/Comment.cpp
+++ b/clang/lib/AST/Comment.cpp
@@ -56,16 +56,16 @@ good implements_child_begin_end(Comment::child_iterator (T::*)() const) {
return good();
}
-LLVM_ATTRIBUTE_UNUSED
-static inline bad implements_child_begin_end(
- Comment::child_iterator (Comment::*)() const) {
+[[maybe_unused]]
+static inline bad
+implements_child_begin_end(Comment::child_iterator (Comment::*)() const) {
return bad();
}
#define ASSERT_IMPLEMENTS_child_begin(function) \
(void) good(implements_child_begin_end(function))
-LLVM_ATTRIBUTE_UNUSED
+[[maybe_unused]]
static inline void CheckCommentASTNodes() {
#define ABSTRACT_COMMENT(COMMENT)
#define COMMENT(CLASS, PARENT) \
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 16141b2..e308c17 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11819,6 +11819,14 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case clang::X86::BI__builtin_ia32_pavgw512:
return EvaluateBinOpExpr(llvm::APIntOps::avgCeilU);
+ case clang::X86::BI__builtin_ia32_pmulhrsw128:
+ case clang::X86::BI__builtin_ia32_pmulhrsw256:
+ case clang::X86::BI__builtin_ia32_pmulhrsw512:
+ return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
+ return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
+ .extractBits(16, 1);
+ });
+
case clang::X86::BI__builtin_ia32_pmaddubsw128:
case clang::X86::BI__builtin_ia32_pmaddubsw256:
case clang::X86::BI__builtin_ia32_pmaddubsw512:
diff --git a/clang/lib/AST/Stmt.cpp b/clang/lib/AST/Stmt.cpp
index 9ae8aea..11ece49 100644
--- a/clang/lib/AST/Stmt.cpp
+++ b/clang/lib/AST/Stmt.cpp
@@ -252,7 +252,7 @@ namespace {
template <class T> good implements_children(children_t T::*) {
return good();
}
- LLVM_ATTRIBUTE_UNUSED
+ [[maybe_unused]]
static bad implements_children(children_t Stmt::*) {
return bad();
}
@@ -261,15 +261,19 @@ namespace {
template <class T> good implements_getBeginLoc(getBeginLoc_t T::*) {
return good();
}
- LLVM_ATTRIBUTE_UNUSED
- static bad implements_getBeginLoc(getBeginLoc_t Stmt::*) { return bad(); }
+ [[maybe_unused]]
+ static bad implements_getBeginLoc(getBeginLoc_t Stmt::*) {
+ return bad();
+ }
typedef SourceLocation getLocEnd_t() const;
template <class T> good implements_getEndLoc(getLocEnd_t T::*) {
return good();
}
- LLVM_ATTRIBUTE_UNUSED
- static bad implements_getEndLoc(getLocEnd_t Stmt::*) { return bad(); }
+ [[maybe_unused]]
+ static bad implements_getEndLoc(getLocEnd_t Stmt::*) {
+ return bad();
+ }
#define ASSERT_IMPLEMENTS_children(type) \
(void) is_good(implements_children(&type::children))
@@ -282,7 +286,7 @@ namespace {
/// Check whether the various Stmt classes implement their member
/// functions.
-LLVM_ATTRIBUTE_UNUSED
+[[maybe_unused]]
static inline void check_implementations() {
#define ABSTRACT_STMT(type)
#define STMT(type, base) \
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index 586c300..ff8ca01 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -151,11 +151,11 @@ namespace {
else StmtVisitor<StmtPrinter>::Visit(S);
}
- void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
+ [[maybe_unused]] void VisitStmt(Stmt *Node) {
Indent() << "<<unknown stmt type>>" << NL;
}
- void VisitExpr(Expr *Node) LLVM_ATTRIBUTE_UNUSED {
+ [[maybe_unused]] void VisitExpr(Expr *Node) {
OS << "<<unknown expr type>>";
}
diff --git a/clang/lib/AST/TemplateBase.cpp b/clang/lib/AST/TemplateBase.cpp
index 76f96fb..131ae6e 100644
--- a/clang/lib/AST/TemplateBase.cpp
+++ b/clang/lib/AST/TemplateBase.cpp
@@ -340,13 +340,14 @@ bool TemplateArgument::isPackExpansion() const {
}
bool TemplateArgument::isConceptOrConceptTemplateParameter() const {
- if (getKind() == TemplateArgument::Template) {
- if (isa<ConceptDecl>(getAsTemplate().getAsTemplateDecl()))
- return true;
- else if (auto *TTP = dyn_cast_if_present<TemplateTemplateParmDecl>(
- getAsTemplate().getAsTemplateDecl()))
- return TTP->templateParameterKind() == TNK_Concept_template;
- }
+ if (getKind() != TemplateArgument::Template)
+ return false;
+
+ if (isa_and_nonnull<ConceptDecl>(getAsTemplate().getAsTemplateDecl()))
+ return true;
+ if (auto *TTP = llvm::dyn_cast_or_null<TemplateTemplateParmDecl>(
+ getAsTemplate().getAsTemplateDecl()))
+ return TTP->templateParameterKind() == TNK_Concept_template;
return false;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
index 274d11b..171ce1c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
@@ -171,7 +171,8 @@ cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl gd) {
curCGF = nullptr;
setNonAliasAttributes(gd, fn);
- assert(!cir::MissingFeatures::opFuncAttributesForDefinition());
+ setCIRFunctionAttributesForDefinition(mlir::cast<FunctionDecl>(gd.getDecl()),
+ fn);
return fn;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 81e5fe2..19ed656 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -871,7 +871,7 @@ bool ConstRecordBuilder::updateRecord(ConstantEmitter &emitter,
class ConstExprEmitter
: public StmtVisitor<ConstExprEmitter, mlir::Attribute, QualType> {
CIRGenModule &cgm;
- LLVM_ATTRIBUTE_UNUSED ConstantEmitter &emitter;
+ [[maybe_unused]] ConstantEmitter &emitter;
public:
ConstExprEmitter(ConstantEmitter &emitter)
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index d54d2e9..c184d4a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -950,8 +950,7 @@ const char *vTableClassNameForType(const CIRGenModule &cgm, const Type *ty) {
break;
case Type::Enum:
- cgm.errorNYI("VTableClassNameForType: Enum");
- break;
+ return "_ZTVN10__cxxabiv116__enum_type_infoE";
case Type::Record: {
const auto *rd = cast<CXXRecordDecl>(cast<RecordType>(ty)->getDecl())
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 57c7a44..127f763 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -451,7 +451,7 @@ void CIRGenModule::emitGlobalFunctionDefinition(clang::GlobalDecl gd,
curCGF = nullptr;
setNonAliasAttributes(gd, funcOp);
- assert(!cir::MissingFeatures::opFuncAttributesForDefinition());
+ setCIRFunctionAttributesForDefinition(funcDecl, funcOp);
auto getPriority = [this](const auto *attr) -> int {
Expr *e = attr->getPriority();
@@ -1919,6 +1919,91 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl,
}
}
+void CIRGenModule::setCIRFunctionAttributesForDefinition(
+ const clang::FunctionDecl *decl, cir::FuncOp f) {
+ assert(!cir::MissingFeatures::opFuncUnwindTablesAttr());
+ assert(!cir::MissingFeatures::stackProtector());
+
+ std::optional<cir::InlineKind> existingInlineKind = f.getInlineKind();
+ bool isNoInline =
+ existingInlineKind && *existingInlineKind == cir::InlineKind::NoInline;
+ bool isAlwaysInline = existingInlineKind &&
+ *existingInlineKind == cir::InlineKind::AlwaysInline;
+
+ if (!decl) {
+ assert(!cir::MissingFeatures::hlsl());
+
+ if (!isAlwaysInline &&
+ codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
+ // If inlining is disabled and we don't have a declaration to control
+ // inlining, mark the function as 'noinline' unless it is explicitly
+ // marked as 'alwaysinline'.
+ f.setInlineKindAttr(
+ cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline));
+ }
+
+ return;
+ }
+
+ assert(!cir::MissingFeatures::opFuncArmStreamingAttr());
+ assert(!cir::MissingFeatures::opFuncArmNewAttr());
+ assert(!cir::MissingFeatures::opFuncOptNoneAttr());
+ assert(!cir::MissingFeatures::opFuncMinSizeAttr());
+ assert(!cir::MissingFeatures::opFuncNakedAttr());
+ assert(!cir::MissingFeatures::opFuncNoDuplicateAttr());
+ assert(!cir::MissingFeatures::hlsl());
+
+ // Handle inline attributes
+ if (decl->hasAttr<NoInlineAttr>() && !isAlwaysInline) {
+ // Add noinline if the function isn't always_inline.
+ f.setInlineKindAttr(
+ cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline));
+ } else if (decl->hasAttr<AlwaysInlineAttr>() && !isNoInline) {
+ // Don't override AlwaysInline with NoInline, or vice versa, since we can't
+ // specify both in IR.
+ f.setInlineKindAttr(
+ cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::AlwaysInline));
+ } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
+ // If inlining is disabled, force everything that isn't always_inline
+ // to carry an explicit noinline attribute.
+ if (!isAlwaysInline) {
+ f.setInlineKindAttr(
+ cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline));
+ }
+ } else {
+ // Otherwise, propagate the inline hint attribute and potentially use its
+ // absence to mark things as noinline.
+ // Search function and template pattern redeclarations for inline.
+ if (auto *fd = dyn_cast<FunctionDecl>(decl)) {
+ // TODO: Share this checkForInline implementation with classic codegen.
+ // This logic is likely to change over time, so sharing would help ensure
+ // consistency.
+ auto checkForInline = [](const FunctionDecl *decl) {
+ auto checkRedeclForInline = [](const FunctionDecl *redecl) {
+ return redecl->isInlineSpecified();
+ };
+ if (any_of(decl->redecls(), checkRedeclForInline))
+ return true;
+ const FunctionDecl *pattern = decl->getTemplateInstantiationPattern();
+ if (!pattern)
+ return false;
+ return any_of(pattern->redecls(), checkRedeclForInline);
+ };
+ if (checkForInline(fd)) {
+ f.setInlineKindAttr(cir::InlineAttr::get(&getMLIRContext(),
+ cir::InlineKind::InlineHint));
+ } else if (codeGenOpts.getInlining() ==
+ CodeGenOptions::OnlyHintInlining &&
+ !fd->isInlined() && !isAlwaysInline) {
+ f.setInlineKindAttr(
+ cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline));
+ }
+ }
+ }
+
+ assert(!cir::MissingFeatures::opFuncColdHotAttr());
+}
+
cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
StringRef mangledName, mlir::Type funcType, GlobalDecl gd, bool forVTable,
bool dontDefer, bool isThunk, ForDefinition_t isForDefinition,
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 690f0ed..1fc116d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -429,6 +429,10 @@ public:
void setFunctionAttributes(GlobalDecl gd, cir::FuncOp f,
bool isIncompleteFunction, bool isThunk);
+ /// Set extra attributes (inline, etc.) for a function.
+ void setCIRFunctionAttributesForDefinition(const clang::FunctionDecl *fd,
+ cir::FuncOp f);
+
void emitGlobalDefinition(clang::GlobalDecl gd,
mlir::Operation *op = nullptr);
void emitGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op);
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index 25b6ecb..c05142e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -307,8 +307,8 @@ class AggValueSlot {
/// This is set to true if some external code is responsible for setting up a
/// destructor for the slot. Otherwise the code which constructs it should
/// push the appropriate cleanup.
- LLVM_PREFERRED_TYPE(bool)
- LLVM_ATTRIBUTE_UNUSED unsigned destructedFlag : 1;
+ [[maybe_unused]]
+ LLVM_PREFERRED_TYPE(bool) unsigned destructedFlag : 1;
/// This is set to true if the memory in the slot is known to be zero before
/// the assignment into it. This means that zero fields don't need to be set.
@@ -326,16 +326,16 @@ class AggValueSlot {
/// over. Since it's invalid in general to memcpy a non-POD C++
/// object, it's important that this flag never be set when
/// evaluating an expression which constructs such an object.
- LLVM_PREFERRED_TYPE(bool)
- LLVM_ATTRIBUTE_UNUSED unsigned aliasedFlag : 1;
+ [[maybe_unused]]
+ LLVM_PREFERRED_TYPE(bool) unsigned aliasedFlag : 1;
/// This is set to true if the tail padding of this slot might overlap
/// another object that may have already been initialized (and whose
/// value must be preserved by this initialization). If so, we may only
/// store up to the dsize of the type. Otherwise we can widen stores to
/// the size of the type.
- LLVM_PREFERRED_TYPE(bool)
- LLVM_ATTRIBUTE_UNUSED unsigned overlapFlag : 1;
+ [[maybe_unused]]
+ LLVM_PREFERRED_TYPE(bool) unsigned overlapFlag : 1;
public:
enum IsDestructed_t { IsNotDestructed, IsDestructed };
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 0712de2..b4c3704 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -1758,6 +1758,36 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) {
}).failed())
return failure();
+ // Parse optional inline kind: inline(never|always|hint)
+ if (parser.parseOptionalKeyword("inline").succeeded()) {
+ if (parser.parseLParen().failed())
+ return failure();
+
+ llvm::StringRef inlineKindStr;
+ const std::array<llvm::StringRef, cir::getMaxEnumValForInlineKind()>
+ allowedInlineKindStrs{
+ cir::stringifyInlineKind(cir::InlineKind::NoInline),
+ cir::stringifyInlineKind(cir::InlineKind::AlwaysInline),
+ cir::stringifyInlineKind(cir::InlineKind::InlineHint),
+ };
+ if (parser.parseOptionalKeyword(&inlineKindStr, allowedInlineKindStrs)
+ .failed())
+ return parser.emitError(parser.getCurrentLocation(),
+ "expected 'never', 'always', or 'hint'");
+
+ std::optional<InlineKind> inlineKind =
+ cir::symbolizeInlineKind(inlineKindStr);
+ if (!inlineKind)
+ return parser.emitError(parser.getCurrentLocation(),
+ "invalid inline kind");
+
+ state.addAttribute(getInlineKindAttrName(state.name),
+ cir::InlineAttr::get(builder.getContext(), *inlineKind));
+
+ if (parser.parseRParen().failed())
+ return failure();
+ }
+
// Parse the optional function body.
auto *body = state.addRegion();
OptionalParseResult parseResult = parser.parseOptionalRegion(
@@ -1851,6 +1881,10 @@ void cir::FuncOp::print(OpAsmPrinter &p) {
p << "(" << globalDtorPriority.value() << ")";
}
+ if (cir::InlineAttr inlineAttr = getInlineKindAttr()) {
+ p << " inline(" << cir::stringifyInlineKind(inlineAttr.getValue()) << ")";
+ }
+
// Print the body if this is not an external function.
Region &body = getOperation()->getRegion(0);
if (!body.empty()) {
@@ -2977,8 +3011,11 @@ static mlir::ParseResult parseTryHandlerRegions(
return failure();
}
- if (!currRegion.empty() && !(currRegion.back().mightHaveTerminator() &&
- currRegion.back().getTerminator()))
+ if (currRegion.empty())
+ return parser.emitError(regionLoc, "handler region shall not be empty");
+
+ if (!(currRegion.back().mightHaveTerminator() &&
+ currRegion.back().getTerminator()))
return parser.emitError(
regionLoc, "blocks are expected to be explicitly terminated");
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 1fc98ec..0243bf1 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1587,6 +1587,7 @@ void CIRToLLVMFuncOpLowering::lowerFuncAttributes(
attr.getName() == getLinkageAttrNameString() ||
attr.getName() == func.getGlobalVisibilityAttrName() ||
attr.getName() == func.getDsoLocalAttrName() ||
+ attr.getName() == func.getInlineKindAttrName() ||
(filterArgAndResAttrs &&
(attr.getName() == func.getArgAttrsAttrName() ||
attr.getName() == func.getResAttrsAttrName())))
@@ -1671,6 +1672,12 @@ mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewrite(
assert(!cir::MissingFeatures::opFuncMultipleReturnVals());
+ if (auto inlineKind = op.getInlineKind()) {
+ fn.setNoInline(inlineKind == cir::InlineKind::NoInline);
+ fn.setInlineHint(inlineKind == cir::InlineKind::InlineHint);
+ fn.setAlwaysInline(inlineKind == cir::InlineKind::AlwaysInline);
+ }
+
fn.setVisibility_Attr(mlir::LLVM::VisibilityAttr::get(
getContext(), lowerCIRVisibilityToLLVMVisibility(
op.getGlobalVisibilityAttr().getValue())));
diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
index 4f2f5a76..384bd59 100644
--- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp
+++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
@@ -160,6 +160,16 @@ static Value *handleHlslSplitdouble(const CallExpr *E, CodeGenFunction *CGF) {
return LastInst;
}
+static Value *emitBufferStride(CodeGenFunction *CGF, const Expr *HandleExpr,
+ LValue &Stride) {
+ // Figure out the stride of the buffer elements from the handle type.
+ auto *HandleTy =
+ cast<HLSLAttributedResourceType>(HandleExpr->getType().getTypePtr());
+ QualType ElementTy = HandleTy->getContainedType();
+ Value *StrideValue = CGF->getTypeSize(ElementTy);
+ return CGF->Builder.CreateStore(StrideValue, Stride.getAddress());
+}
+
// Return dot product intrinsic that corresponds to the QT scalar type
static Intrinsic::ID getDotProductIntrinsic(CGHLSLRuntime &RT, QualType QT) {
if (QT->isFloatingType())
@@ -372,6 +382,19 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
RetTy, CGM.getHLSLRuntime().getNonUniformResourceIndexIntrinsic(),
ArrayRef<Value *>{IndexOp});
}
+ case Builtin::BI__builtin_hlsl_resource_getdimensions_x: {
+ Value *Handle = EmitScalarExpr(E->getArg(0));
+ LValue Dim = EmitLValue(E->getArg(1));
+ llvm::Type *RetTy = llvm::Type::getInt32Ty(getLLVMContext());
+ Value *DimValue = Builder.CreateIntrinsic(
+ RetTy, CGM.getHLSLRuntime().getGetDimensionsXIntrinsic(),
+ ArrayRef<Value *>{Handle});
+ return Builder.CreateStore(DimValue, Dim.getAddress());
+ }
+ case Builtin::BI__builtin_hlsl_resource_getstride: {
+ LValue Stride = EmitLValue(E->getArg(1));
+ return emitBufferStride(this, E->getArg(0), Stride);
+ }
case Builtin::BI__builtin_hlsl_all: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
return Builder.CreateIntrinsic(
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h
index 7c6c285..103b4a9 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -135,6 +135,7 @@ public:
GENERATE_HLSL_INTRINSIC_FUNCTION(BufferUpdateCounter, resource_updatecounter)
GENERATE_HLSL_INTRINSIC_FUNCTION(GroupMemoryBarrierWithGroupSync,
group_memory_barrier_with_group_sync)
+ GENERATE_HLSL_INTRINSIC_FUNCTION(GetDimensionsX, resource_getdimensions_x)
//===----------------------------------------------------------------------===//
// End of reserved area for HLSL intrinsic getters.
diff --git a/clang/lib/CodeGen/CodeGenTBAA.cpp b/clang/lib/CodeGen/CodeGenTBAA.cpp
index 4e29d8a..cd08f3e 100644
--- a/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -609,8 +609,7 @@ llvm::MDNode *CodeGenTBAA::getValidBaseTypeInfo(QualType QTy) {
// First calculate the metadata, before recomputing the insertion point, as
// the helper can recursively call us.
llvm::MDNode *TypeNode = getBaseTypeInfoHelper(Ty);
- LLVM_ATTRIBUTE_UNUSED auto inserted =
- BaseTypeMetadataCache.insert({Ty, TypeNode});
+ [[maybe_unused]] auto inserted = BaseTypeMetadataCache.insert({Ty, TypeNode});
assert(inserted.second && "BaseType metadata was already inserted");
return TypeNode;
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 0fcbf7e..16d5919 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -402,6 +402,26 @@ void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
F->addFnAttr("amdgpu-max-num-workgroups", AttrVal.str());
}
+
+ if (auto *Attr = FD->getAttr<CUDAClusterDimsAttr>()) {
+ auto GetExprVal = [&](const auto &E) {
+ return E ? E->EvaluateKnownConstInt(M.getContext()).getExtValue() : 1;
+ };
+ unsigned X = GetExprVal(Attr->getX());
+ unsigned Y = GetExprVal(Attr->getY());
+ unsigned Z = GetExprVal(Attr->getZ());
+ llvm::SmallString<32> AttrVal;
+ llvm::raw_svector_ostream OS(AttrVal);
+ OS << X << ',' << Y << ',' << Z;
+ F->addFnAttr("amdgpu-cluster-dims", AttrVal.str());
+ }
+
+ // OpenCL doesn't support cluster feature.
+ const TargetInfo &TTI = M.getContext().getTargetInfo();
+ if ((IsOpenCLKernel &&
+ TTI.hasFeatureEnabled(TTI.getTargetOpts().FeatureMap, "clusters")) ||
+ FD->hasAttr<CUDANoClusterAttr>())
+ F->addFnAttr("amdgpu-cluster-dims", "0,0,0");
}
void AMDGPUTargetCodeGenInfo::setTargetAttributes(
diff --git a/clang/lib/Driver/ToolChains/HLSL.cpp b/clang/lib/Driver/ToolChains/HLSL.cpp
index 5c8891f..20a320e 100644
--- a/clang/lib/Driver/ToolChains/HLSL.cpp
+++ b/clang/lib/Driver/ToolChains/HLSL.cpp
@@ -191,23 +191,35 @@ void getSpirvExtOperand(StringRef SpvExtensionArg, raw_ostream &out) {
// The extensions that are commented out are supported in DXC, but the SPIR-V
// backend does not know about them yet.
static const std::vector<StringRef> DxcSupportedExtensions = {
- "SPV_KHR_16bit_storage", "SPV_KHR_device_group",
- "SPV_KHR_fragment_shading_rate", "SPV_KHR_multiview",
- "SPV_KHR_post_depth_coverage", "SPV_KHR_non_semantic_info",
- "SPV_KHR_shader_draw_parameters", "SPV_KHR_ray_tracing",
- "SPV_KHR_shader_clock", "SPV_EXT_demote_to_helper_invocation",
- "SPV_EXT_descriptor_indexing", "SPV_EXT_fragment_fully_covered",
+ "SPV_KHR_16bit_storage",
+ "SPV_KHR_device_group",
+ "SPV_KHR_fragment_shading_rate",
+ "SPV_KHR_multiview",
+ "SPV_KHR_post_depth_coverage",
+ "SPV_KHR_non_semantic_info",
+ "SPV_KHR_shader_draw_parameters",
+ "SPV_KHR_ray_tracing",
+ "SPV_KHR_shader_clock",
+ "SPV_EXT_demote_to_helper_invocation",
+ "SPV_EXT_descriptor_indexing",
+ "SPV_EXT_fragment_fully_covered",
"SPV_EXT_fragment_invocation_density",
- "SPV_EXT_fragment_shader_interlock", "SPV_EXT_mesh_shader",
- "SPV_EXT_shader_stencil_export", "SPV_EXT_shader_viewport_index_layer",
+ "SPV_EXT_fragment_shader_interlock",
+ "SPV_EXT_mesh_shader",
+ "SPV_EXT_shader_stencil_export",
+ "SPV_EXT_shader_viewport_index_layer",
// "SPV_AMD_shader_early_and_late_fragment_tests",
- "SPV_GOOGLE_hlsl_functionality1", "SPV_GOOGLE_user_type",
- "SPV_KHR_ray_query", "SPV_EXT_shader_image_int64",
- "SPV_KHR_fragment_shader_barycentric", "SPV_KHR_physical_storage_buffer",
+ "SPV_GOOGLE_hlsl_functionality1",
+ "SPV_GOOGLE_user_type",
+ "SPV_KHR_ray_query",
+ "SPV_EXT_shader_image_int64",
+ "SPV_KHR_fragment_shader_barycentric",
+ "SPV_KHR_physical_storage_buffer",
"SPV_KHR_vulkan_memory_model",
// "SPV_KHR_compute_shader_derivatives",
- // "SPV_KHR_maximal_reconvergence",
- "SPV_KHR_float_controls", "SPV_NV_shader_subgroup_partitioned",
+ "SPV_KHR_maximal_reconvergence",
+ "SPV_KHR_float_controls",
+ "SPV_NV_shader_subgroup_partitioned",
// "SPV_KHR_quad_control"
};
diff --git a/clang/lib/Format/BreakableToken.cpp b/clang/lib/Format/BreakableToken.cpp
index 29db200..994a427 100644
--- a/clang/lib/Format/BreakableToken.cpp
+++ b/clang/lib/Format/BreakableToken.cpp
@@ -306,8 +306,10 @@ BreakableStringLiteralUsingOperators::BreakableStringLiteralUsingOperators(
// In Verilog, all strings are quoted by double quotes, joined by commas,
// and wrapped in braces. The comma is always before the newline.
assert(QuoteStyle == DoubleQuotes);
- LeftBraceQuote = Style.Cpp11BracedListStyle ? "{\"" : "{ \"";
- RightBraceQuote = Style.Cpp11BracedListStyle ? "\"}" : "\" }";
+ LeftBraceQuote =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? "{\"" : "{ \"";
+ RightBraceQuote =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? "\"}" : "\" }";
Postfix = "\",";
Prefix = "\"";
} else {
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index cd4c1aa..26a9542 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -411,7 +411,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
}
if (CurrentState.BreakBeforeClosingBrace &&
(Current.closesBlockOrBlockTypeList(Style) ||
- (Current.is(tok::r_brace) &&
+ (Current.is(tok::r_brace) && Current.MatchingParen &&
Current.isBlockIndentedInitRBrace(Style)))) {
return true;
}
@@ -833,7 +833,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
auto IsOpeningBracket = [&](const FormatToken &Tok) {
auto IsStartOfBracedList = [&]() {
return Tok.is(tok::l_brace) && Tok.isNot(BK_Block) &&
- Style.Cpp11BracedListStyle;
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block;
};
if (Tok.isNoneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) &&
!IsStartOfBracedList()) {
@@ -925,7 +925,12 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
TT_TableGenDAGArgOpenerToBreak) &&
!(Current.MacroParent && Previous.MacroParent) &&
(Current.isNot(TT_LineComment) ||
- Previous.isOneOf(BK_BracedInit, TT_VerilogMultiLineListLParen)) &&
+ (Previous.is(BK_BracedInit) &&
+ (Style.Cpp11BracedListStyle != FormatStyle::BLS_FunctionCall ||
+ !Previous.Previous ||
+ Previous.Previous->isNoneOf(tok::identifier, tok::l_paren,
+ BK_BracedInit))) ||
+ Previous.is(TT_VerilogMultiLineListLParen)) &&
!IsInTemplateString(Current)) {
CurrentState.Indent = State.Column + Spaces;
CurrentState.IsAligned = true;
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 686e541..edd126c 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -304,6 +304,18 @@ struct ScalarEnumerationTraits<FormatStyle::BreakTemplateDeclarationsStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::BracedListStyle> {
+ static void enumeration(IO &IO, FormatStyle::BracedListStyle &Value) {
+ IO.enumCase(Value, "Block", FormatStyle::BLS_Block);
+ IO.enumCase(Value, "FunctionCall", FormatStyle::BLS_FunctionCall);
+ IO.enumCase(Value, "AlignFirstComment", FormatStyle::BLS_AlignFirstComment);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::BLS_Block);
+ IO.enumCase(Value, "true", FormatStyle::BLS_AlignFirstComment);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::DAGArgStyle> {
static void enumeration(IO &IO, FormatStyle::DAGArgStyle &Value) {
IO.enumCase(Value, "DontBreak", FormatStyle::DAS_DontBreak);
@@ -1628,7 +1640,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.CompactNamespaces = false;
LLVMStyle.ConstructorInitializerIndentWidth = 4;
LLVMStyle.ContinuationIndentWidth = 4;
- LLVMStyle.Cpp11BracedListStyle = true;
+ LLVMStyle.Cpp11BracedListStyle = FormatStyle::BLS_AlignFirstComment;
LLVMStyle.DerivePointerAlignment = false;
LLVMStyle.DisableFormat = false;
LLVMStyle.EmptyLineAfterAccessModifier = FormatStyle::ELAAMS_Never;
@@ -1904,7 +1916,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
// beneficial there. Investigate turning this on once proper string reflow
// has been implemented.
GoogleStyle.BreakStringLiterals = false;
- GoogleStyle.Cpp11BracedListStyle = false;
+ GoogleStyle.Cpp11BracedListStyle = FormatStyle::BLS_Block;
GoogleStyle.SpacesInContainerLiterals = false;
} else if (Language == FormatStyle::LK_ObjC) {
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
@@ -2000,7 +2012,7 @@ FormatStyle getMozillaStyle() {
MozillaStyle.BreakTemplateDeclarations = FormatStyle::BTDS_Yes;
MozillaStyle.ConstructorInitializerIndentWidth = 2;
MozillaStyle.ContinuationIndentWidth = 2;
- MozillaStyle.Cpp11BracedListStyle = false;
+ MozillaStyle.Cpp11BracedListStyle = FormatStyle::BLS_Block;
MozillaStyle.FixNamespaceComments = false;
MozillaStyle.IndentCaseLabels = true;
MozillaStyle.ObjCSpaceAfterProperty = true;
@@ -2023,7 +2035,7 @@ FormatStyle getWebKitStyle() {
Style.BreakBeforeBraces = FormatStyle::BS_WebKit;
Style.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
Style.ColumnLimit = 0;
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
Style.FixNamespaceComments = false;
Style.IndentWidth = 4;
Style.NamespaceIndentation = FormatStyle::NI_Inner;
@@ -2043,7 +2055,7 @@ FormatStyle getGNUStyle() {
Style.BreakBeforeBraces = FormatStyle::BS_GNU;
Style.BreakBeforeTernaryOperators = true;
Style.ColumnLimit = 79;
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
Style.FixNamespaceComments = false;
Style.KeepFormFeed = true;
Style.SpaceBeforeParens = FormatStyle::SBPO_Always;
@@ -2184,8 +2196,9 @@ std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
Input >> Styles;
if (Input.error())
return Input.error();
+ if (Styles.empty())
+ return make_error_code(ParseError::Success);
- assert(!Styles.empty());
const auto StyleCount = Styles.size();
// Start from the second style as (only) the first one may be the default.
diff --git a/clang/lib/Format/FormatToken.cpp b/clang/lib/Format/FormatToken.cpp
index cb3fc1c..d1c6264 100644
--- a/clang/lib/Format/FormatToken.cpp
+++ b/clang/lib/Format/FormatToken.cpp
@@ -65,12 +65,13 @@ bool FormatToken::isTypeOrIdentifier(const LangOptions &LangOpts) const {
bool FormatToken::isBlockIndentedInitRBrace(const FormatStyle &Style) const {
assert(is(tok::r_brace));
- if (!Style.Cpp11BracedListStyle ||
+ assert(MatchingParen);
+ assert(MatchingParen->is(tok::l_brace));
+ if (Style.Cpp11BracedListStyle == FormatStyle::BLS_Block ||
Style.AlignAfterOpenBracket != FormatStyle::BAS_BlockIndent) {
return false;
}
const auto *LBrace = MatchingParen;
- assert(LBrace && LBrace->is(tok::l_brace));
if (LBrace->is(BK_BracedInit))
return true;
if (LBrace->Previous && LBrace->Previous->is(tok::equal))
@@ -87,7 +88,8 @@ bool FormatToken::opensBlockOrBlockTypeList(const FormatStyle &Style) const {
return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
(is(tok::l_brace) &&
(getBlockKind() == BK_Block || is(TT_DictLiteral) ||
- (!Style.Cpp11BracedListStyle && NestingLevel == 0))) ||
+ (Style.Cpp11BracedListStyle == FormatStyle::BLS_Block &&
+ NestingLevel == 0))) ||
(is(tok::less) && Style.isProto());
}
@@ -183,7 +185,8 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// In C++11 braced list style, we should not format in columns unless they
// have many items (20 or more) or we allow bin-packing of function call
// arguments.
- if (Style.Cpp11BracedListStyle && !Style.BinPackArguments &&
+ if (Style.Cpp11BracedListStyle != FormatStyle::BLS_Block &&
+ !Style.BinPackArguments &&
(Commas.size() < 19 || !Style.BinPackLongBracedList)) {
return;
}
@@ -227,7 +230,7 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
ItemEnd = Token->MatchingParen;
const FormatToken *NonCommentEnd = ItemEnd->getPreviousNonComment();
ItemLengths.push_back(CodePointsBetween(ItemBegin, NonCommentEnd));
- if (Style.Cpp11BracedListStyle &&
+ if (Style.Cpp11BracedListStyle != FormatStyle::BLS_Block &&
!ItemEnd->Previous->isTrailingComment()) {
// In Cpp11 braced list style, the } and possibly other subsequent
// tokens will need to stay on a line with the last element.
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 5b784ed..778d2ca 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -3794,18 +3794,12 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
if (Current.is(TT_FunctionDeclarationName))
return true;
- if (!Current.Tok.getIdentifierInfo())
+ if (Current.isNoneOf(tok::identifier, tok::kw_operator))
return false;
const auto *Prev = Current.getPreviousNonComment();
assert(Prev);
- if (Prev->is(tok::coloncolon))
- Prev = Prev->Previous;
-
- if (!Prev)
- return false;
-
const auto &Previous = *Prev;
if (const auto *PrevPrev = Previous.getPreviousNonComment();
@@ -3854,6 +3848,8 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
// Find parentheses of parameter list.
if (Current.is(tok::kw_operator)) {
+ if (Line.startsWith(tok::kw_friend))
+ return true;
if (Previous.Tok.getIdentifierInfo() &&
Previous.isNoneOf(tok::kw_return, tok::kw_co_return)) {
return true;
@@ -4098,7 +4094,8 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
if (Current->is(TT_LineComment)) {
if (Prev->is(BK_BracedInit) && Prev->opensScope()) {
Current->SpacesRequiredBefore =
- (Style.Cpp11BracedListStyle && !Style.SpacesInParensOptions.Other)
+ (Style.Cpp11BracedListStyle == FormatStyle::BLS_AlignFirstComment &&
+ !Style.SpacesInParensOptions.Other)
? 0
: 1;
} else if (Prev->is(TT_VerilogMultiLineListLParen)) {
@@ -4449,8 +4446,10 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
(Left.ParameterCount <= 1 || Style.AllowAllArgumentsOnNextLine)) {
return 0;
}
- if (Left.is(tok::l_brace) && !Style.Cpp11BracedListStyle)
+ if (Left.is(tok::l_brace) &&
+ Style.Cpp11BracedListStyle == FormatStyle::BLS_Block) {
return 19;
+ }
return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
: 19;
}
@@ -4616,7 +4615,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// Format empty list as `<>`.
if (Left.is(tok::less) && Right.is(tok::greater))
return false;
- return !Style.Cpp11BracedListStyle;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block;
}
// Don't attempt to format operator<(), as it is handled later.
if (Right.isNot(TT_OverloadedOperatorLParen))
@@ -4784,7 +4783,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const auto SpaceRequiredForArrayInitializerLSquare =
[](const FormatToken &LSquareTok, const FormatStyle &Style) {
return Style.SpacesInContainerLiterals ||
- (Style.isProto() && !Style.Cpp11BracedListStyle &&
+ (Style.isProto() &&
+ Style.Cpp11BracedListStyle == FormatStyle::BLS_Block &&
LSquareTok.endsSequence(tok::l_square, tok::colon,
TT_SelectorName));
};
@@ -4817,7 +4817,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if ((Left.is(tok::l_brace) && Left.isNot(BK_Block)) ||
(Right.is(tok::r_brace) && Right.MatchingParen &&
Right.MatchingParen->isNot(BK_Block))) {
- return !Style.Cpp11BracedListStyle || Style.SpacesInParensOptions.Other;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block ||
+ Style.SpacesInParensOptions.Other;
}
if (Left.is(TT_BlockComment)) {
// No whitespace in x(/*foo=*/1), except for JavaScript.
@@ -4999,7 +5000,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Left.Children.empty()) {
if (Left.is(BK_Block))
return Style.SpaceInEmptyBraces != FormatStyle::SIEB_Never;
- if (Style.Cpp11BracedListStyle) {
+ if (Style.Cpp11BracedListStyle != FormatStyle::BLS_Block) {
return Style.SpacesInParens == FormatStyle::SIPO_Custom &&
Style.SpacesInParensOptions.InEmptyParentheses;
}
@@ -5081,7 +5082,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.MatchingParen &&
Left.MatchingParen->is(TT_ProtoExtensionLSquare) &&
Right.isOneOf(tok::l_brace, tok::less)) {
- return !Style.Cpp11BracedListStyle;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block;
}
// A percent is probably part of a formatting specification, such as %lld.
if (Left.is(tok::percent))
@@ -5521,7 +5522,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::greater) && Right.is(tok::greater)) {
if (Style.isTextProto() ||
(Style.Language == FormatStyle::LK_Proto && Left.is(TT_DictLiteral))) {
- return !Style.Cpp11BracedListStyle;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block;
}
return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) &&
((Style.Standard < FormatStyle::LS_Cpp11) ||
@@ -6382,7 +6383,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return false;
}
if (Left.is(tok::equal) && Right.is(tok::l_brace) &&
- !Style.Cpp11BracedListStyle) {
+ Style.Cpp11BracedListStyle == FormatStyle::BLS_Block) {
return false;
}
if (Left.is(TT_AttributeLParen) ||
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index dec71191..5e2584e 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -60,7 +60,7 @@ void printLine(llvm::raw_ostream &OS, const UnwrappedLine &Line,
OS << "\n";
}
-LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line) {
+[[maybe_unused]] static void printDebugInfo(const UnwrappedLine &Line) {
printLine(llvm::dbgs(), Line);
}
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index 7348a3a..9261294 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -1238,7 +1238,8 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
if (!CellDescs.isRectangular())
return;
- const int BracePadding = Style.Cpp11BracedListStyle ? 0 : 1;
+ const int BracePadding =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? 0 : 1;
auto &Cells = CellDescs.Cells;
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
@@ -1314,7 +1315,8 @@ void WhitespaceManager::alignArrayInitializersLeftJustified(
if (!CellDescs.isRectangular())
return;
- const int BracePadding = Style.Cpp11BracedListStyle ? 0 : 1;
+ const int BracePadding =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? 0 : 1;
auto &Cells = CellDescs.Cells;
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
diff --git a/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
index da1e39a..fb0ece9 100644
--- a/clang/lib/Headers/__clang_hip_runtime_wrapper.h
+++ b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -25,6 +25,8 @@
#define __constant__ __attribute__((constant))
#define __managed__ __attribute__((managed))
+#define __cluster_dims__(...) __attribute__((cluster_dims(__VA_ARGS__)))
+
#if !defined(__cplusplus) || __cplusplus < 201103L
#define nullptr NULL;
#endif
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index fa7f4c2..d35bc0e 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -1650,9 +1650,8 @@ _mm256_mul_epi32(__m256i __a, __m256i __b) {
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the rounded products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mulhrs_epi16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
}
@@ -1670,8 +1669,7 @@ _mm256_mulhrs_epi16(__m256i __a, __m256i __b)
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mulhi_epu16(__m256i __a, __m256i __b)
-{
+_mm256_mulhi_epu16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_ia32_pmulhuw256((__v16hu)__a, (__v16hu)__b);
}
diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index 23b2d29..ac75b6c 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -1003,23 +1003,20 @@ _mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I,
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhrs_epi16(__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mulhrs_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mulhrs_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mulhrs_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 639fb60..0fcfe37 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -1510,28 +1510,28 @@ _mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
__builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhrs_epi16(__X, __Y),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhrs_epi16(__X, __Y),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhrs_epi16(__X, __Y),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhrs_epi16(__X, __Y),
diff --git a/clang/lib/Headers/tmmintrin.h b/clang/lib/Headers/tmmintrin.h
index ee96caa..5d0f20f 100644
--- a/clang/lib/Headers/tmmintrin.h
+++ b/clang/lib/Headers/tmmintrin.h
@@ -544,8 +544,8 @@ _mm_maddubs_pi16(__m64 __a, __m64 __b) {
/// A 128-bit vector of [8 x i16] containing one of the source operands.
/// \returns A 128-bit vector of [8 x i16] containing the rounded and scaled
/// products of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhrs_epi16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
}
@@ -563,11 +563,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a,
/// A 64-bit vector of [4 x i16] containing one of the source operands.
/// \returns A 64-bit vector of [4 x i16] containing the rounded and scaled
/// products of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_mulhrs_pi16(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_pmulhrsw128((__v8hi)__anyext128(__a),
- (__v8hi)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhrs_pi16(__m64 __a, __m64 __b) {
+ return __trunc64(__builtin_ia32_pmulhrsw128((__v8hi)__zext128(__a),
+ (__v8hi)__zext128(__b)));
}
/// Copies the 8-bit integers from a 128-bit integer vector to the
diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp
index dbc7cbc..330a9c6 100644
--- a/clang/lib/Parse/ParseTemplate.cpp
+++ b/clang/lib/Parse/ParseTemplate.cpp
@@ -533,6 +533,12 @@ bool Parser::isTypeConstraintAnnotation() {
bool Parser::TryAnnotateTypeConstraint() {
if (!getLangOpts().CPlusPlus20)
return false;
+ // The type constraint may declare template parameters, notably
+ // if it contains a generic lambda, so we need to increment
+ // the template depth as these parameters would not be instantiated
+ // at the current depth.
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ ++CurTemplateDepthTracker;
CXXScopeSpec SS;
bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
diff --git a/clang/lib/Sema/DeclSpec.cpp b/clang/lib/Sema/DeclSpec.cpp
index 184d31e..9da3d0d 100644
--- a/clang/lib/Sema/DeclSpec.cpp
+++ b/clang/lib/Sema/DeclSpec.cpp
@@ -1369,7 +1369,8 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
if (S.getLangOpts().C23 &&
getConstexprSpecifier() == ConstexprSpecKind::Constexpr &&
- StorageClassSpec == SCS_extern) {
+ getTypeSpecType() != TST_unspecified &&
+ (StorageClassSpec == SCS_extern || StorageClassSpec == SCS_auto)) {
S.Diag(ConstexprLoc, diag::err_invalid_decl_spec_combination)
<< DeclSpec::getSpecifierName(getStorageClassSpec())
<< SourceRange(getStorageClassSpecLoc());
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
index 40c318a..066acf6 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
@@ -57,6 +57,29 @@ CXXConstructorDecl *lookupCopyConstructor(QualType ResTy) {
return CD;
return nullptr;
}
+
+ParameterABI
+convertParamModifierToParamABI(HLSLParamModifierAttr::Spelling Modifier) {
+ assert(Modifier != HLSLParamModifierAttr::Spelling::Keyword_in &&
+ "HLSL 'in' parameters modifier cannot be converted to ParameterABI");
+ switch (Modifier) {
+ case HLSLParamModifierAttr::Spelling::Keyword_out:
+ return ParameterABI::HLSLOut;
+ case HLSLParamModifierAttr::Spelling::Keyword_inout:
+ return ParameterABI::HLSLInOut;
+ default:
+ llvm_unreachable("Invalid HLSL parameter modifier");
+ }
+}
+
+QualType getInoutParameterType(ASTContext &AST, QualType Ty) {
+ assert(!Ty->isReferenceType() &&
+ "Pointer and reference types cannot be inout or out parameters");
+ Ty = AST.getLValueReferenceType(Ty);
+ Ty.addRestrict();
+ return Ty;
+}
+
} // namespace
// Builder for template arguments of builtin types. Used internally
@@ -430,19 +453,36 @@ BuiltinTypeMethodBuilder::addParam(StringRef Name, QualType Ty,
void BuiltinTypeMethodBuilder::createDecl() {
assert(Method == nullptr && "Method or constructor is already created");
- // create method or constructor type
+ // create function prototype
ASTContext &AST = DeclBuilder.SemaRef.getASTContext();
SmallVector<QualType> ParamTypes;
- for (Param &MP : Params)
+ SmallVector<FunctionType::ExtParameterInfo> ParamExtInfos(Params.size());
+ uint32_t ArgIndex = 0;
+
+ // Create function prototype.
+ bool UseParamExtInfo = false;
+ for (Param &MP : Params) {
+ if (MP.Modifier != HLSLParamModifierAttr::Keyword_in) {
+ UseParamExtInfo = true;
+ FunctionType::ExtParameterInfo &PI = ParamExtInfos[ArgIndex];
+ ParamExtInfos[ArgIndex] =
+ PI.withABI(convertParamModifierToParamABI(MP.Modifier));
+ if (!MP.Ty->isDependentType())
+ MP.Ty = getInoutParameterType(AST, MP.Ty);
+ }
ParamTypes.emplace_back(MP.Ty);
+ ++ArgIndex;
+ }
FunctionProtoType::ExtProtoInfo ExtInfo;
+ if (UseParamExtInfo)
+ ExtInfo.ExtParameterInfos = ParamExtInfos.data();
if (IsConst)
ExtInfo.TypeQuals.addConst();
QualType FuncTy = AST.getFunctionType(ReturnTy, ParamTypes, ExtInfo);
- // create method or constructor decl
+ // Create method or constructor declaration.
auto *TSInfo = AST.getTrivialTypeSourceInfo(FuncTy, SourceLocation());
DeclarationNameInfo NameInfo = DeclarationNameInfo(Name, SourceLocation());
if (IsCtor)
@@ -455,7 +495,7 @@ void BuiltinTypeMethodBuilder::createDecl() {
AST, DeclBuilder.Record, SourceLocation(), NameInfo, FuncTy, TSInfo, SC,
false, false, ConstexprSpecKind::Unspecified, SourceLocation());
- // create params & set them to the function prototype
+ // Create params & set them to the method/constructor and function prototype.
SmallVector<ParmVarDecl *> ParmDecls;
unsigned CurScopeDepth = DeclBuilder.SemaRef.getCurScope()->getDepth();
auto FnProtoLoc =
@@ -1258,5 +1298,37 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addConsumeMethod() {
.finalize();
}
+BuiltinTypeDeclBuilder &
+BuiltinTypeDeclBuilder::addGetDimensionsMethodForBuffer() {
+ using PH = BuiltinTypeMethodBuilder::PlaceHolder;
+ ASTContext &AST = SemaRef.getASTContext();
+ QualType UIntTy = AST.UnsignedIntTy;
+
+ QualType HandleTy = getResourceHandleField()->getType();
+ auto *AttrResTy = cast<HLSLAttributedResourceType>(HandleTy.getTypePtr());
+
+ // Structured buffers except {RW}ByteAddressBuffer have overload
+ // GetDimensions(out uint numStructs, out uint stride).
+ if (AttrResTy->getAttrs().RawBuffer &&
+ AttrResTy->getContainedType() != AST.Char8Ty) {
+ return BuiltinTypeMethodBuilder(*this, "GetDimensions", AST.VoidTy)
+ .addParam("numStructs", UIntTy, HLSLParamModifierAttr::Keyword_out)
+ .addParam("stride", UIntTy, HLSLParamModifierAttr::Keyword_out)
+ .callBuiltin("__builtin_hlsl_resource_getdimensions_x", QualType(),
+ PH::Handle, PH::_0)
+ .callBuiltin("__builtin_hlsl_resource_getstride", QualType(),
+ PH::Handle, PH::_1)
+ .finalize();
+ }
+
+ // Typed buffers and {RW}ByteAddressBuffer have overload
+ // GetDimensions(out uint dim).
+ return BuiltinTypeMethodBuilder(*this, "GetDimensions", AST.VoidTy)
+ .addParam("dim", UIntTy, HLSLParamModifierAttr::Keyword_out)
+ .callBuiltin("__builtin_hlsl_resource_getdimensions_x", QualType(),
+ PH::Handle, PH::_0)
+ .finalize();
+}
+
} // namespace hlsl
} // namespace clang
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
index 86cbd10..95e3a6c 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
@@ -94,6 +94,8 @@ public:
BuiltinTypeDeclBuilder &addAppendMethod();
BuiltinTypeDeclBuilder &addConsumeMethod();
+ BuiltinTypeDeclBuilder &addGetDimensionsMethodForBuffer();
+
private:
BuiltinTypeDeclBuilder &addCreateFromBinding();
BuiltinTypeDeclBuilder &addCreateFromImplicitBinding();
diff --git a/clang/lib/Sema/HLSLExternalSemaSource.cpp b/clang/lib/Sema/HLSLExternalSemaSource.cpp
index f28a037..6be84f1 100644
--- a/clang/lib/Sema/HLSLExternalSemaSource.cpp
+++ b/clang/lib/Sema/HLSLExternalSemaSource.cpp
@@ -380,6 +380,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
/*RawBuffer=*/false, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -392,6 +393,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
/*RawBuffer=*/false, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -404,6 +406,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
/*RawBuffer=*/false, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -415,6 +418,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
/*RawBuffer=*/true, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -428,6 +432,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.addLoadMethods()
.addIncrementCounterMethod()
.addDecrementCounterMethod()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -439,6 +444,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
/*RawBuffer=*/true, /*HasCounter=*/true)
.addAppendMethod()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -450,6 +456,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
/*RawBuffer=*/true, /*HasCounter=*/true)
.addConsumeMethod()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -464,6 +471,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.addLoadMethods()
.addIncrementCounterMethod()
.addDecrementCounterMethod()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -472,6 +480,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::SRV, /*IsROV=*/false,
/*RawBuffer=*/true, /*HasCounter=*/false)
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RWByteAddressBuffer")
@@ -479,6 +488,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
/*RawBuffer=*/true, /*HasCounter=*/false)
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace,
@@ -487,6 +497,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/true,
/*RawBuffer=*/true, /*HasCounter=*/false)
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
}
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 87dd682..04a73181 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -1217,13 +1217,51 @@ bool Sema::CheckConstraintSatisfaction(
return false;
}
+static const ExprResult
+SubstituteConceptsInConstrainExpression(Sema &S, const NamedDecl *D,
+ const ConceptSpecializationExpr *CSE,
+ UnsignedOrNone SubstIndex) {
+
+ // [C++2c] [temp.constr.normal]
+ // Otherwise, to form CE, any non-dependent concept template argument Ai
+ // is substituted into the constraint-expression of C.
+ // If any such substitution results in an invalid concept-id,
+ // the program is ill-formed; no diagnostic is required.
+
+ ConceptDecl *Concept = CSE->getNamedConcept()->getCanonicalDecl();
+ Sema::ArgPackSubstIndexRAII _(S, SubstIndex);
+
+ const ASTTemplateArgumentListInfo *ArgsAsWritten =
+ CSE->getTemplateArgsAsWritten();
+ if (llvm::none_of(
+ ArgsAsWritten->arguments(), [&](const TemplateArgumentLoc &ArgLoc) {
+ return !ArgLoc.getArgument().isDependent() &&
+ ArgLoc.getArgument().isConceptOrConceptTemplateParameter();
+ })) {
+ return Concept->getConstraintExpr();
+ }
+
+ MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
+ Concept, Concept->getLexicalDeclContext(),
+ /*Final=*/false, CSE->getTemplateArguments(),
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr,
+ /*ForConstraintInstantiation=*/true);
+ return S.SubstConceptTemplateArguments(CSE, Concept->getConstraintExpr(),
+ MLTAL);
+}
+
bool Sema::CheckConstraintSatisfaction(
const ConceptSpecializationExpr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction) {
+ ExprResult Res = SubstituteConceptsInConstrainExpression(
+ *this, nullptr, ConstraintExpr, ArgPackSubstIndex);
+ if (!Res.isUsable())
+ return true;
+
llvm::SmallVector<AssociatedConstraint, 1> Constraints;
- Constraints.emplace_back(
- ConstraintExpr->getNamedConcept()->getConstraintExpr());
+ Constraints.emplace_back(Res.get());
MultiLevelTemplateArgumentList MLTAL(ConstraintExpr->getNamedConcept(),
ConstraintExpr->getTemplateArguments(),
@@ -2249,8 +2287,14 @@ NormalizedConstraint *NormalizedConstraint::fromConstraintExpr(
// Use canonical declarations to merge ConceptDecls across
// different modules.
ConceptDecl *CD = CSE->getNamedConcept()->getCanonicalDecl();
+
+ ExprResult Res =
+ SubstituteConceptsInConstrainExpression(S, D, CSE, SubstIndex);
+ if (!Res.isUsable())
+ return nullptr;
+
SubNF = NormalizedConstraint::fromAssociatedConstraints(
- S, CD, AssociatedConstraint(CD->getConstraintExpr(), SubstIndex));
+ S, CD, AssociatedConstraint(Res.get(), SubstIndex));
if (!SubNF)
return nullptr;
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 04d46d6..fc3aabf 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -7640,6 +7640,58 @@ static bool isMainVar(DeclarationName Name, VarDecl *VD) {
VD->isExternC());
}
+void Sema::CheckAsmLabel(Scope *S, Expr *E, StorageClass SC,
+ TypeSourceInfo *TInfo, VarDecl *NewVD) {
+
+ // Quickly return if the function does not have an `asm` attribute.
+ if (E == nullptr)
+ return;
+
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ StringRef Label = SE->getString();
+ QualType R = TInfo->getType();
+ if (S->getFnParent() != nullptr) {
+ switch (SC) {
+ case SC_None:
+ case SC_Auto:
+ Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
+ break;
+ case SC_Register:
+ // Local Named register
+ if (!Context.getTargetInfo().isValidGCCRegisterName(Label) &&
+ DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ break;
+ case SC_Static:
+ case SC_Extern:
+ case SC_PrivateExtern:
+ break;
+ }
+ } else if (SC == SC_Register) {
+ // Global Named register
+ if (DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) {
+ const auto &TI = Context.getTargetInfo();
+ bool HasSizeMismatch;
+
+ if (!TI.isValidGCCRegisterName(Label))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ else if (!TI.validateGlobalRegisterVariable(Label, Context.getTypeSize(R),
+ HasSizeMismatch))
+ Diag(E->getExprLoc(), diag::err_asm_invalid_global_var_reg) << Label;
+ else if (HasSizeMismatch)
+ Diag(E->getExprLoc(), diag::err_asm_register_size_mismatch) << Label;
+ }
+
+ if (!R->isIntegralType(Context) && !R->isPointerType()) {
+ Diag(TInfo->getTypeLoc().getBeginLoc(),
+ diag::err_asm_unsupported_register_type)
+ << TInfo->getTypeLoc().getSourceRange();
+ NewVD->setInvalidDecl(true);
+ }
+ }
+}
+
NamedDecl *Sema::ActOnVariableDeclarator(
Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo,
LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists,
@@ -8124,6 +8176,26 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
}
+ if (Expr *E = D.getAsmLabel()) {
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ StringRef Label = SE->getString();
+
+ // Insert the asm attribute.
+ NewVD->addAttr(AsmLabelAttr::Create(Context, Label, SE->getStrTokenLoc(0)));
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
+ llvm::DenseMap<IdentifierInfo *, AsmLabelAttr *>::iterator I =
+ ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
+ if (I != ExtnameUndeclaredIdentifiers.end()) {
+ if (isDeclExternC(NewVD)) {
+ NewVD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ } else
+ Diag(NewVD->getLocation(), diag::warn_redefine_extname_not_applied)
+ << /*Variable*/ 1 << NewVD;
+ }
+ }
+
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
@@ -8174,65 +8246,11 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (getLangOpts().ObjCAutoRefCount && ObjC().inferObjCARCLifetime(NewVD))
NewVD->setInvalidDecl();
- // Handle GNU asm-label extension (encoded as an attribute).
- if (Expr *E = D.getAsmLabel()) {
- // The parser guarantees this is a string.
- StringLiteral *SE = cast<StringLiteral>(E);
- StringRef Label = SE->getString();
- if (S->getFnParent() != nullptr) {
- switch (SC) {
- case SC_None:
- case SC_Auto:
- Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
- break;
- case SC_Register:
- // Local Named register
- if (!Context.getTargetInfo().isValidGCCRegisterName(Label) &&
- DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()))
- Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
- break;
- case SC_Static:
- case SC_Extern:
- case SC_PrivateExtern:
- break;
- }
- } else if (SC == SC_Register) {
- // Global Named register
- if (DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) {
- const auto &TI = Context.getTargetInfo();
- bool HasSizeMismatch;
-
- if (!TI.isValidGCCRegisterName(Label))
- Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
- else if (!TI.validateGlobalRegisterVariable(Label,
- Context.getTypeSize(R),
- HasSizeMismatch))
- Diag(E->getExprLoc(), diag::err_asm_invalid_global_var_reg) << Label;
- else if (HasSizeMismatch)
- Diag(E->getExprLoc(), diag::err_asm_register_size_mismatch) << Label;
- }
-
- if (!R->isIntegralType(Context) && !R->isPointerType()) {
- Diag(TInfo->getTypeLoc().getBeginLoc(),
- diag::err_asm_unsupported_register_type)
- << TInfo->getTypeLoc().getSourceRange();
- NewVD->setInvalidDecl(true);
- }
- }
-
- NewVD->addAttr(AsmLabelAttr::Create(Context, Label, SE->getStrTokenLoc(0)));
- } else if (!ExtnameUndeclaredIdentifiers.empty()) {
- llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
- ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
- if (I != ExtnameUndeclaredIdentifiers.end()) {
- if (isDeclExternC(NewVD)) {
- NewVD->addAttr(I->second);
- ExtnameUndeclaredIdentifiers.erase(I);
- } else
- Diag(NewVD->getLocation(), diag::warn_redefine_extname_not_applied)
- << /*Variable*/1 << NewVD;
- }
- }
+ // Check the ASM label here, as we need to know all other attributes of the
+ // Decl first. Otherwise, we can't know if the asm label refers to the
+ // host or device in a CUDA context. The device has other registers than
+ // host and we must know where the function will be placed.
+ CheckAsmLabel(S, D.getAsmLabel(), SC, TInfo, NewVD);
// Find the shadowed declaration before filtering for scope.
NamedDecl *ShadowedDecl = D.getCXXScopeSpec().isEmpty()
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index e6f8748..9475b8a 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -5676,6 +5676,114 @@ static void handleLaunchBoundsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AL.getNumArgs() > 2 ? AL.getArgAsExpr(2) : nullptr);
}
+static std::pair<Expr *, int>
+makeClusterDimsArgExpr(Sema &S, Expr *E, const CUDAClusterDimsAttr &AL,
+ const unsigned Idx) {
+ if (!E || S.DiagnoseUnexpandedParameterPack(E))
+ return {};
+
+ // Accept template arguments for now as they depend on something else.
+ // We'll get to check them when they eventually get instantiated.
+ if (E->isInstantiationDependent())
+ return {E, 1};
+
+ std::optional<llvm::APSInt> I = E->getIntegerConstantExpr(S.Context);
+ if (!I) {
+ S.Diag(E->getExprLoc(), diag::err_attribute_argument_n_type)
+ << &AL << Idx << AANT_ArgumentIntegerConstant << E->getSourceRange();
+ return {};
+ }
+ // Make sure we can fit it in 4 bits.
+ if (!I->isIntN(4)) {
+ S.Diag(E->getExprLoc(), diag::err_ice_too_large)
+ << toString(*I, 10, false) << 4 << /*Unsigned=*/1;
+ return {};
+ }
+ if (*I < 0) {
+ S.Diag(E->getExprLoc(), diag::warn_attribute_argument_n_negative)
+ << &AL << Idx << E->getSourceRange();
+ }
+
+ return {ConstantExpr::Create(S.getASTContext(), E, APValue(*I)),
+ I->getZExtValue()};
+}
+
+CUDAClusterDimsAttr *Sema::createClusterDimsAttr(const AttributeCommonInfo &CI,
+ Expr *X, Expr *Y, Expr *Z) {
+ CUDAClusterDimsAttr TmpAttr(Context, CI, X, Y, Z);
+
+ auto [NewX, ValX] = makeClusterDimsArgExpr(*this, X, TmpAttr, /*Idx=*/0);
+ auto [NewY, ValY] = makeClusterDimsArgExpr(*this, Y, TmpAttr, /*Idx=*/1);
+ auto [NewZ, ValZ] = makeClusterDimsArgExpr(*this, Z, TmpAttr, /*Idx=*/2);
+
+ if (!NewX || (Y && !NewY) || (Z && !NewZ))
+ return nullptr;
+
+ int FlatDim = ValX * ValY * ValZ;
+ const llvm::Triple TT =
+ (!Context.getLangOpts().CUDAIsDevice && Context.getAuxTargetInfo())
+ ? Context.getAuxTargetInfo()->getTriple()
+ : Context.getTargetInfo().getTriple();
+ int MaxDim = 1;
+ if (TT.isNVPTX())
+ MaxDim = 8;
+ else if (TT.isAMDGPU())
+ MaxDim = 16;
+ else
+ return nullptr;
+
+ // A maximum of 8 thread blocks in a cluster is supported as a portable
+ // cluster size in CUDA. The number is 16 for AMDGPU.
+ if (FlatDim > MaxDim) {
+ Diag(CI.getLoc(), diag::err_cluster_dims_too_large) << MaxDim << FlatDim;
+ return nullptr;
+ }
+
+ return CUDAClusterDimsAttr::Create(Context, NewX, NewY, NewZ, CI);
+}
+
+void Sema::addClusterDimsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *X,
+ Expr *Y, Expr *Z) {
+ if (auto *Attr = createClusterDimsAttr(CI, X, Y, Z))
+ D->addAttr(Attr);
+}
+
+void Sema::addNoClusterAttr(Decl *D, const AttributeCommonInfo &CI) {
+ D->addAttr(CUDANoClusterAttr::Create(Context, CI));
+}
+
+static void handleClusterDimsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ const TargetInfo &TTI = S.Context.getTargetInfo();
+ OffloadArch Arch = StringToOffloadArch(TTI.getTargetOpts().CPU);
+ if ((TTI.getTriple().isNVPTX() && Arch < clang::OffloadArch::SM_90) ||
+ (TTI.getTriple().isAMDGPU() &&
+ !TTI.hasFeatureEnabled(TTI.getTargetOpts().FeatureMap, "clusters"))) {
+ S.Diag(AL.getLoc(), diag::err_cluster_attr_not_supported) << AL;
+ return;
+ }
+
+ if (!AL.checkAtLeastNumArgs(S, /*Num=*/1) ||
+ !AL.checkAtMostNumArgs(S, /*Num=*/3))
+ return;
+
+ S.addClusterDimsAttr(D, AL, AL.getArgAsExpr(0),
+ AL.getNumArgs() > 1 ? AL.getArgAsExpr(1) : nullptr,
+ AL.getNumArgs() > 2 ? AL.getArgAsExpr(2) : nullptr);
+}
+
+static void handleNoClusterAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ const TargetInfo &TTI = S.Context.getTargetInfo();
+ OffloadArch Arch = StringToOffloadArch(TTI.getTargetOpts().CPU);
+ if ((TTI.getTriple().isNVPTX() && Arch < clang::OffloadArch::SM_90) ||
+ (TTI.getTriple().isAMDGPU() &&
+ !TTI.hasFeatureEnabled(TTI.getTargetOpts().FeatureMap, "clusters"))) {
+ S.Diag(AL.getLoc(), diag::err_cluster_attr_not_supported) << AL;
+ return;
+ }
+
+ S.addNoClusterAttr(D, AL);
+}
+
static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
@@ -7141,6 +7249,12 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_CUDALaunchBounds:
handleLaunchBoundsAttr(S, D, AL);
break;
+ case ParsedAttr::AT_CUDAClusterDims:
+ handleClusterDimsAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_CUDANoCluster:
+ handleNoClusterAttr(S, D, AL);
+ break;
case ParsedAttr::AT_Restrict:
handleRestrictAttr(S, D, AL);
break;
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 3e0e9bb..dca9d6e 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -15944,6 +15944,20 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input.get()->getSourceRange());
}
+ } else if (Context.getLangOpts().HLSL && resultType->isVectorType() &&
+ !resultType->hasBooleanRepresentation()) {
+ // HLSL unary logical 'not' behaves like C++, which states that the
+ // operand is converted to bool and the result is bool, however HLSL
+ // extends this property to vectors.
+ const VectorType *VTy = resultType->castAs<VectorType>();
+ resultType =
+ Context.getExtVectorType(Context.BoolTy, VTy->getNumElements());
+
+ Input = ImpCastExprToType(
+ Input.get(), resultType,
+ ScalarTypeToBooleanCastKind(VTy->getElementType()))
+ .get();
+ break;
} else if (resultType->isExtVectorType()) {
if (Context.getLangOpts().OpenCL &&
Context.getLangOpts().getOpenCLCompatibleVersion() < 120) {
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index f347066..5b3e89f 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -3006,6 +3006,24 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
TheCall->setType(CounterHandleTy);
break;
}
+ case Builtin::BI__builtin_hlsl_resource_getdimensions_x: {
+ ASTContext &AST = SemaRef.getASTContext();
+ if (SemaRef.checkArgCount(TheCall, 2) ||
+ CheckResourceHandle(&SemaRef, TheCall, 0) ||
+ CheckArgTypeMatches(&SemaRef, TheCall->getArg(1), AST.UnsignedIntTy) ||
+ CheckModifiableLValue(&SemaRef, TheCall, 1))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_hlsl_resource_getstride: {
+ ASTContext &AST = SemaRef.getASTContext();
+ if (SemaRef.checkArgCount(TheCall, 2) ||
+ CheckResourceHandle(&SemaRef, TheCall, 0) ||
+ CheckArgTypeMatches(&SemaRef, TheCall->getArg(1), AST.UnsignedIntTy) ||
+ CheckModifiableLValue(&SemaRef, TheCall, 1))
+ return true;
+ break;
+ }
case Builtin::BI__builtin_hlsl_and:
case Builtin::BI__builtin_hlsl_or: {
if (SemaRef.checkArgCount(TheCall, 2))
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 3ba93ff9..c5ef0d5 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -1464,7 +1464,8 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
}
else if (Info.ElementType->isBFloat16Type() &&
!FeatureMap.lookup("zvfbfmin") &&
- !FeatureMap.lookup("xandesvbfhcvt"))
+ !FeatureMap.lookup("xandesvbfhcvt") &&
+ !FeatureMap.lookup("experimental-zvfbfa"))
if (DeclareAndesVectorBuiltins) {
Diag(Loc, diag::err_riscv_type_requires_extension, D)
<< Ty << "zvfbfmin or xandesvbfhcvt";
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index bec2820..ca7e3b2 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -35,6 +35,7 @@
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
+#include "llvm/ADT/SmallVectorExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
@@ -4487,6 +4488,119 @@ ExprResult Sema::SubstConstraintExprWithoutSatisfaction(
return Instantiator.TransformExpr(E);
}
+ExprResult Sema::SubstConceptTemplateArguments(
+ const ConceptSpecializationExpr *CSE, const Expr *ConstraintExpr,
+ const MultiLevelTemplateArgumentList &MLTAL) {
+ TemplateInstantiator Instantiator(*this, MLTAL, SourceLocation(),
+ DeclarationName());
+ const ASTTemplateArgumentListInfo *ArgsAsWritten =
+ CSE->getTemplateArgsAsWritten();
+ TemplateArgumentListInfo SubstArgs(ArgsAsWritten->getLAngleLoc(),
+ ArgsAsWritten->getRAngleLoc());
+
+ Sema::InstantiatingTemplate Inst(
+ *this, ArgsAsWritten->arguments().front().getSourceRange().getBegin(),
+ Sema::InstantiatingTemplate::ConstraintNormalization{},
+ CSE->getNamedConcept(),
+ ArgsAsWritten->arguments().front().getSourceRange());
+
+ if (Inst.isInvalid())
+ return ExprError();
+
+ if (Instantiator.TransformConceptTemplateArguments(
+ ArgsAsWritten->getTemplateArgs(),
+ ArgsAsWritten->getTemplateArgs() +
+ ArgsAsWritten->getNumTemplateArgs(),
+ SubstArgs))
+ return true;
+
+ llvm::SmallVector<TemplateArgument, 4> NewArgList = llvm::map_to_vector(
+ SubstArgs.arguments(),
+ [](const TemplateArgumentLoc &Loc) { return Loc.getArgument(); });
+
+ MultiLevelTemplateArgumentList MLTALForConstraint =
+ getTemplateInstantiationArgs(
+ CSE->getNamedConcept(),
+ CSE->getNamedConcept()->getLexicalDeclContext(),
+ /*Final=*/false,
+ /*Innermost=*/NewArgList,
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr,
+ /*ForConstraintInstantiation=*/true);
+
+ // Rebuild a constraint, only substituting non-dependent concept names
+ // and nothing else.
+ // Given C<SomeType, SomeValue, SomeConceptName, SomeDependentConceptName>.
+ // only SomeConceptName is substituted, in the constraint expression of C.
+ struct ConstraintExprTransformer : TreeTransform<ConstraintExprTransformer> {
+ using Base = TreeTransform<ConstraintExprTransformer>;
+ MultiLevelTemplateArgumentList &MLTAL;
+
+ ConstraintExprTransformer(Sema &SemaRef,
+ MultiLevelTemplateArgumentList &MLTAL)
+ : TreeTransform(SemaRef), MLTAL(MLTAL) {}
+
+ ExprResult TransformExpr(Expr *E) {
+ if (!E)
+ return E;
+ switch (E->getStmtClass()) {
+ case Stmt::BinaryOperatorClass:
+ case Stmt::ConceptSpecializationExprClass:
+ case Stmt::ParenExprClass:
+ case Stmt::UnresolvedLookupExprClass:
+ return Base::TransformExpr(E);
+ default:
+ break;
+ }
+ return E;
+ }
+
+ // Rebuild both branches of a conjunction / disjunction
+ // even if there is a substitution failure in one of
+ // the branch.
+ ExprResult TransformBinaryOperator(BinaryOperator *E) {
+ if (!(E->getOpcode() == BinaryOperatorKind::BO_LAnd ||
+ E->getOpcode() == BinaryOperatorKind::BO_LOr))
+ return E;
+
+ ExprResult LHS = TransformExpr(E->getLHS());
+ ExprResult RHS = TransformExpr(E->getRHS());
+
+ if (LHS.get() == E->getLHS() && RHS.get() == E->getRHS())
+ return E;
+
+ return BinaryOperator::Create(SemaRef.Context, LHS.get(), RHS.get(),
+ E->getOpcode(), SemaRef.Context.BoolTy,
+ VK_PRValue, OK_Ordinary,
+ E->getOperatorLoc(), FPOptionsOverride{});
+ }
+
+ bool TransformTemplateArgument(const TemplateArgumentLoc &Input,
+ TemplateArgumentLoc &Output,
+ bool Uneval = false) {
+ if (Input.getArgument().isConceptOrConceptTemplateParameter())
+ return Base::TransformTemplateArgument(Input, Output, Uneval);
+
+ Output = Input;
+ return false;
+ }
+
+ ExprResult TransformUnresolvedLookupExpr(UnresolvedLookupExpr *E,
+ bool IsAddressOfOperand = false) {
+ if (E->isConceptReference()) {
+ ExprResult Res = SemaRef.SubstExpr(E, MLTAL);
+ return Res;
+ }
+ return E;
+ }
+ };
+
+ ConstraintExprTransformer Transformer(*this, MLTALForConstraint);
+ ExprResult Res =
+ Transformer.TransformExpr(const_cast<Expr *>(ConstraintExpr));
+ return Res;
+}
+
ExprResult Sema::SubstInitializer(Expr *Init,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit) {
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 468bc1d..4863b45 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -707,6 +707,23 @@ static void instantiateDependentAMDGPUMaxNumWorkGroupsAttr(
S.AMDGPU().addAMDGPUMaxNumWorkGroupsAttr(New, Attr, XExpr, YExpr, ZExpr);
}
+static void instantiateDependentCUDAClusterDimsAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const CUDAClusterDimsAttr &Attr, Decl *New) {
+ EnterExpressionEvaluationContext Unevaluated(
+ S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ auto SubstElt = [&S, &TemplateArgs](Expr *E) {
+ return E ? S.SubstExpr(E, TemplateArgs).get() : nullptr;
+ };
+
+ Expr *XExpr = SubstElt(Attr.getX());
+ Expr *YExpr = SubstElt(Attr.getY());
+ Expr *ZExpr = SubstElt(Attr.getZ());
+
+ S.addClusterDimsAttr(New, Attr, XExpr, YExpr, ZExpr);
+}
+
// This doesn't take any template parameters, but we have a custom action that
// needs to happen when the kernel itself is instantiated. We need to run the
// ItaniumMangler to mark the names required to name this kernel.
@@ -765,10 +782,18 @@ static bool isRelevantAttr(Sema &S, const Decl *D, const Attr *A) {
static void instantiateDependentHLSLParamModifierAttr(
Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
- const HLSLParamModifierAttr *Attr, Decl *New) {
- ParmVarDecl *P = cast<ParmVarDecl>(New);
- P->addAttr(Attr->clone(S.getASTContext()));
- P->setType(S.HLSL().getInoutParameterType(P->getType()));
+ const HLSLParamModifierAttr *Attr, const Decl *Old, Decl *New) {
+ ParmVarDecl *NewParm = cast<ParmVarDecl>(New);
+ NewParm->addAttr(Attr->clone(S.getASTContext()));
+
+ const Type *OldParmTy = cast<ParmVarDecl>(Old)->getType().getTypePtr();
+ if (OldParmTy->isDependentType() && Attr->isAnyOut())
+ NewParm->setType(S.HLSL().getInoutParameterType(NewParm->getType()));
+
+ assert(
+ (!Attr->isAnyOut() || (NewParm->getType().isRestrictQualified() &&
+ NewParm->getType()->isReferenceType())) &&
+ "out or inout parameter type must be a reference and restrict qualified");
}
void Sema::InstantiateAttrsForDecl(
@@ -921,9 +946,14 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
*this, TemplateArgs, *AMDGPUMaxNumWorkGroups, New);
}
+ if (const auto *CUDAClusterDims = dyn_cast<CUDAClusterDimsAttr>(TmplAttr)) {
+ instantiateDependentCUDAClusterDimsAttr(*this, TemplateArgs,
+ *CUDAClusterDims, New);
+ }
+
if (const auto *ParamAttr = dyn_cast<HLSLParamModifierAttr>(TmplAttr)) {
instantiateDependentHLSLParamModifierAttr(*this, TemplateArgs, ParamAttr,
- New);
+ Tmpl, New);
continue;
}
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 86896ab..29f0c30 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -694,6 +694,12 @@ public:
TemplateArgumentListInfo &Outputs,
bool Uneval = false);
+ template <typename InputIterator>
+ bool TransformConceptTemplateArguments(InputIterator First,
+ InputIterator Last,
+ TemplateArgumentListInfo &Outputs,
+ bool Uneval = false);
+
/// Checks if the argument pack from \p In will need to be expanded and does
/// the necessary prework.
/// Whether the expansion is needed is captured in Info.Expand.
@@ -5192,6 +5198,49 @@ bool TreeTransform<Derived>::TransformTemplateArguments(
return false;
}
+template <typename Derived>
+template <typename InputIterator>
+bool TreeTransform<Derived>::TransformConceptTemplateArguments(
+ InputIterator First, InputIterator Last, TemplateArgumentListInfo &Outputs,
+ bool Uneval) {
+
+ // [C++26][temp.constr.normal]
+ // any non-dependent concept template argument
+ // is substituted into the constraint-expression of C.
+ auto isNonDependentConceptArgument = [](const TemplateArgument &Arg) {
+ return !Arg.isDependent() && Arg.isConceptOrConceptTemplateParameter();
+ };
+
+ for (; First != Last; ++First) {
+ TemplateArgumentLoc Out;
+ TemplateArgumentLoc In = *First;
+
+ if (In.getArgument().getKind() == TemplateArgument::Pack) {
+ typedef TemplateArgumentLocInventIterator<Derived,
+ TemplateArgument::pack_iterator>
+ PackLocIterator;
+ if (TransformConceptTemplateArguments(
+ PackLocIterator(*this, In.getArgument().pack_begin()),
+ PackLocIterator(*this, In.getArgument().pack_end()), Outputs,
+ Uneval))
+ return true;
+ continue;
+ }
+
+ if (!isNonDependentConceptArgument(In.getArgument())) {
+ Outputs.addArgument(In);
+ continue;
+ }
+
+ if (getDerived().TransformTemplateArgument(In, Out, Uneval))
+ return true;
+
+ Outputs.addArgument(Out);
+ }
+
+ return false;
+}
+
// FIXME: Find ways to reduce code duplication for pack expansions.
template <typename Derived>
bool TreeTransform<Derived>::PreparePackForExpansion(TemplateArgumentLoc In,
diff --git a/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
index bf35bee..3ddd659 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -104,7 +104,7 @@ class RAIIMutexDescriptor {
// this function is called instead of early returning it. To avoid this, a
// bool variable (IdentifierInfoInitialized) is used and the function will
// be run only once.
- const auto &ASTCtx = Call.getState()->getStateManager().getContext();
+ const auto &ASTCtx = Call.getASTContext();
Guard = &ASTCtx.Idents.get(GuardName);
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 9d3aeff..2420848 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -929,7 +929,7 @@ ObjCDeallocChecker::getValueReleasedByNillingOut(const ObjCMethodCall &M,
SVal Arg = M.getArgSVal(0);
ProgramStateRef notNilState, nilState;
std::tie(notNilState, nilState) =
- M.getState()->assume(Arg.castAs<DefinedOrUnknownSVal>());
+ C.getState()->assume(Arg.castAs<DefinedOrUnknownSVal>());
if (!(nilState && !notNilState))
return nullptr;
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index f984caf..227cbfa 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -34,7 +34,7 @@ class ObjCSuperDeallocChecker
this, "[super dealloc] should not be called more than once",
categories::CoreFoundationObjectiveC};
- void initIdentifierInfoAndSelectors(ASTContext &Ctx) const;
+ void initIdentifierInfoAndSelectors(const ASTContext &Ctx) const;
bool isSuperDeallocMessage(const ObjCMethodCall &M) const;
@@ -214,8 +214,8 @@ void ObjCSuperDeallocChecker::diagnoseCallArguments(const CallEvent &CE,
}
}
-void
-ObjCSuperDeallocChecker::initIdentifierInfoAndSelectors(ASTContext &Ctx) const {
+void ObjCSuperDeallocChecker::initIdentifierInfoAndSelectors(
+ const ASTContext &Ctx) const {
if (IIdealloc)
return;
@@ -230,7 +230,7 @@ ObjCSuperDeallocChecker::isSuperDeallocMessage(const ObjCMethodCall &M) const {
if (M.getOriginExpr()->getReceiverKind() != ObjCMessageExpr::SuperInstance)
return false;
- ASTContext &Ctx = M.getState()->getStateManager().getContext();
+ const ASTContext &Ctx = M.getASTContext();
initIdentifierInfoAndSelectors(Ctx);
return M.getSelector() == SELdealloc;
diff --git a/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
index 4fc1c57..db8bbee 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
@@ -211,13 +211,13 @@ private:
if (!DefaultType)
return;
- ProgramStateRef State = ConstructorCall->getState();
+ ProgramStateRef State = C.getState();
State = State->set<VariantHeldTypeMap>(ThisMemRegion, *DefaultType);
C.addTransition(State);
}
bool handleStdGetCall(const CallEvent &Call, CheckerContext &C) const {
- ProgramStateRef State = Call.getState();
+ ProgramStateRef State = C.getState();
const auto &ArgType = Call.getArgSVal(0)
.getType(C.getASTContext())
diff --git a/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h b/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
index dec4612..b8fb572 100644
--- a/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
+++ b/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
@@ -52,7 +52,7 @@ removeInformationStoredForDeadInstances(const CallEvent &Call,
template <class TypeMap>
void handleConstructorAndAssignment(const CallEvent &Call, CheckerContext &C,
SVal ThisSVal) {
- ProgramStateRef State = Call.getState();
+ ProgramStateRef State = C.getState();
if (!State)
return;
diff --git a/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index 02f34bc..c905ee6 100644
--- a/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -173,7 +173,7 @@ const PointerToMemberData *BasicValueFactory::getPointerToMemberData(
return D;
}
-LLVM_ATTRIBUTE_UNUSED static bool hasNoRepeatedElements(
+[[maybe_unused]] static bool hasNoRepeatedElements(
llvm::ImmutableList<const CXXBaseSpecifier *> BaseSpecList) {
llvm::SmallPtrSet<QualType, 16> BaseSpecSeen;
for (const CXXBaseSpecifier *BaseSpec : BaseSpecList) {
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 44c6f9f..8ee4832 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -731,19 +731,22 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
ExplodedNodeSet checkDst;
NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
+ ProgramStateRef State = Pred->getState();
+ CallEventRef<> UpdatedCall = Call.cloneWithState(State);
+
// Check if any of the EvalCall callbacks can evaluate the call.
for (const auto &EvalCallChecker : EvalCallCheckers) {
// TODO: Support the situation when the call doesn't correspond
// to any Expr.
ProgramPoint L = ProgramPoint::getProgramPoint(
- Call.getOriginExpr(), ProgramPoint::PostStmtKind,
+ UpdatedCall->getOriginExpr(), ProgramPoint::PostStmtKind,
Pred->getLocationContext(), EvalCallChecker.Checker);
bool evaluated = false;
- { // CheckerContext generates transitions(populates checkDest) on
+ { // CheckerContext generates transitions (populates checkDest) on
// destruction, so introduce the scope to make sure it gets properly
// populated.
CheckerContext C(B, Eng, Pred, L);
- evaluated = EvalCallChecker(Call, C);
+ evaluated = EvalCallChecker(*UpdatedCall, C);
}
#ifndef NDEBUG
if (evaluated && evaluatorChecker) {
@@ -774,7 +777,7 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
// If none of the checkers evaluated the call, ask ExprEngine to handle it.
if (!evaluatorChecker) {
NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
- Eng.defaultEvalCall(B, Pred, Call, CallOpts);
+ Eng.defaultEvalCall(B, Pred, *UpdatedCall, CallOpts);
}
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 0c491b8..ac6c1d7 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -628,6 +628,8 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
const CallEvent &Call) {
+ // WARNING: The state attached to 'Call' may be obsolete, do not call any
+ // methods that rely on it!
const Expr *E = Call.getOriginExpr();
// FIXME: Constructors to placement arguments of operator new
// are not supported yet.
@@ -653,6 +655,8 @@ ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
ExplodedNode *Pred,
const CallEvent &Call) {
+ // WARNING: The state attached to 'Call' may be obsolete, do not call any
+ // methods that rely on it!
ProgramStateRef State = Pred->getState();
ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
if (CleanedState == State) {
@@ -670,35 +674,33 @@ void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
}
void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
- const CallEvent &Call) {
- // WARNING: At this time, the state attached to 'Call' may be older than the
- // state in 'Pred'. This is a minor optimization since CheckerManager will
- // use an updated CallEvent instance when calling checkers, but if 'Call' is
- // ever used directly in this function all callers should be updated to pass
- // the most recent state. (It is probably not worth doing the work here since
- // for some callers this will not be necessary.)
+ const CallEvent &CallTemplate) {
+ // NOTE: CallTemplate is called a "template" because its attached state may
+ // be obsolete (compared to the state of Pred). The state-dependent methods
+ // of CallEvent should be used only after a `cloneWithState` call that
+ // attaches the up-to-date state to this template object.
// Run any pre-call checks using the generic call interface.
ExplodedNodeSet dstPreVisit;
- getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
- Call, *this);
+ getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, CallTemplate,
+ *this);
// Actually evaluate the function call. We try each of the checkers
// to see if the can evaluate the function call, and get a callback at
// defaultEvalCall if all of them fail.
ExplodedNodeSet dstCallEvaluated;
- getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
- Call, *this, EvalCallOptions());
+ getCheckerManager().runCheckersForEvalCall(
+ dstCallEvaluated, dstPreVisit, CallTemplate, *this, EvalCallOptions());
// If there were other constructors called for object-type arguments
// of this call, clean them up.
ExplodedNodeSet dstArgumentCleanup;
for (ExplodedNode *I : dstCallEvaluated)
- finishArgumentConstruction(dstArgumentCleanup, I, Call);
+ finishArgumentConstruction(dstArgumentCleanup, I, CallTemplate);
ExplodedNodeSet dstPostCall;
getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
- Call, *this);
+ CallTemplate, *this);
// Escaping symbols conjured during invalidating the regions above.
// Note that, for inlined calls the nodes were put back into the worklist,
@@ -708,12 +710,13 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
// Run pointerEscape callback with the newly conjured symbols.
SmallVector<std::pair<SVal, SVal>, 8> Escaped;
for (ExplodedNode *I : dstPostCall) {
- NodeBuilder B(I, Dst, *currBldrCtx);
ProgramStateRef State = I->getState();
+ CallEventRef<> Call = CallTemplate.cloneWithState(State);
+ NodeBuilder B(I, Dst, *currBldrCtx);
Escaped.clear();
{
unsigned Arg = -1;
- for (const ParmVarDecl *PVD : Call.parameters()) {
+ for (const ParmVarDecl *PVD : Call->parameters()) {
++Arg;
QualType ParamTy = PVD->getType();
if (ParamTy.isNull() ||
@@ -722,13 +725,13 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
QualType Pointee = ParamTy->getPointeeType();
if (Pointee.isConstQualified() || Pointee->isVoidType())
continue;
- if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
+ if (const MemRegion *MR = Call->getArgSVal(Arg).getAsRegion())
Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
}
}
State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
- PSK_EscapeOutParameters, &Call);
+ PSK_EscapeOutParameters, &*Call);
if (State == I->getState())
Dst.insert(I);
@@ -1212,48 +1215,47 @@ static bool isTrivialObjectAssignment(const CallEvent &Call) {
}
void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
- const CallEvent &CallTemplate,
+ const CallEvent &Call,
const EvalCallOptions &CallOpts) {
// Make sure we have the most recent state attached to the call.
ProgramStateRef State = Pred->getState();
- CallEventRef<> Call = CallTemplate.cloneWithState(State);
// Special-case trivial assignment operators.
- if (isTrivialObjectAssignment(*Call)) {
- performTrivialCopy(Bldr, Pred, *Call);
+ if (isTrivialObjectAssignment(Call)) {
+ performTrivialCopy(Bldr, Pred, Call);
return;
}
// Try to inline the call.
// The origin expression here is just used as a kind of checksum;
// this should still be safe even for CallEvents that don't come from exprs.
- const Expr *E = Call->getOriginExpr();
+ const Expr *E = Call.getOriginExpr();
ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
if (InlinedFailedState) {
// If we already tried once and failed, make sure we don't retry later.
State = InlinedFailedState;
} else {
- RuntimeDefinition RD = Call->getRuntimeDefinition();
- Call->setForeign(RD.isForeign());
+ RuntimeDefinition RD = Call.getRuntimeDefinition();
+ Call.setForeign(RD.isForeign());
const Decl *D = RD.getDecl();
- if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
+ if (shouldInlineCall(Call, D, Pred, CallOpts)) {
if (RD.mayHaveOtherDefinitions()) {
AnalyzerOptions &Options = getAnalysisManager().options;
// Explore with and without inlining the call.
if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
- BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
+ BifurcateCall(RD.getDispatchRegion(), Call, D, Bldr, Pred);
return;
}
// Don't inline if we're not in any dynamic dispatch mode.
if (Options.getIPAMode() != IPAK_DynamicDispatch) {
- conservativeEvalCall(*Call, Bldr, Pred, State);
+ conservativeEvalCall(Call, Bldr, Pred, State);
return;
}
}
- ctuBifurcate(*Call, D, Bldr, Pred, State);
+ ctuBifurcate(Call, D, Bldr, Pred, State);
return;
}
}
@@ -1261,10 +1263,10 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
// If we can't inline it, clean up the state traits used only if the function
// is inlined.
State = removeStateTraitsUsedForArrayEvaluation(
- State, dyn_cast_or_null<CXXConstructExpr>(E), Call->getLocationContext());
+ State, dyn_cast_or_null<CXXConstructExpr>(E), Call.getLocationContext());
// Also handle the return value and invalidate the regions.
- conservativeEvalCall(*Call, Bldr, Pred, State);
+ conservativeEvalCall(Call, Bldr, Pred, State);
}
void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
diff --git a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index ab45e67..245a730 100644
--- a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -983,7 +983,7 @@ public:
}
/// Check equivalence data for consistency.
- [[nodiscard]] LLVM_ATTRIBUTE_UNUSED static bool
+ [[nodiscard]] [[maybe_unused]] static bool
isClassDataConsistent(ProgramStateRef State);
[[nodiscard]] QualType getType() const {
@@ -1041,8 +1041,7 @@ private:
// Constraint functions
//===----------------------------------------------------------------------===//
-[[nodiscard]] LLVM_ATTRIBUTE_UNUSED bool
-areFeasible(ConstraintRangeTy Constraints) {
+[[nodiscard]] [[maybe_unused]] bool areFeasible(ConstraintRangeTy Constraints) {
return llvm::none_of(
Constraints,
[](const std::pair<EquivalenceClass, RangeSet> &ClassConstraint) {
@@ -1134,7 +1133,7 @@ template <class EndTy>
return End;
}
-[[nodiscard]] LLVM_ATTRIBUTE_UNUSED inline std::optional<RangeSet>
+[[nodiscard]] [[maybe_unused]] inline std::optional<RangeSet>
intersect(RangeSet::Factory &F, const RangeSet *End) {
// This is an extraneous conversion from a raw pointer into
// std::optional<RangeSet>
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 4efde59..f6a3e79 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -62,7 +62,9 @@ ALWAYS_ENABLED_STATISTIC(
"The # of visited basic blocks in the analyzed functions.");
ALWAYS_ENABLED_STATISTIC(PercentReachableBlocks,
"The % of reachable basic blocks.");
-STAT_MAX(MaxCFGSize, "The maximum number of basic blocks in a function.");
+ALWAYS_ENABLED_STATISTIC(MaxCFGSize,
+ "The maximum number of basic blocks in a function.");
+static UnsignedEPStat CFGSize("CFGSize");
//===----------------------------------------------------------------------===//
// AnalysisConsumer declaration.
//===----------------------------------------------------------------------===//
@@ -783,15 +785,19 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
ExprEngine::InliningModes IMode,
SetOfConstDecls *VisitedCallees) {
+ auto *CFG = Mgr->getCFG(D);
+
// Construct the analysis engine. First check if the CFG is valid.
// FIXME: Inter-procedural analysis will need to handle invalid CFGs.
- if (!Mgr->getCFG(D))
+ if (!CFG)
return;
// See if the LiveVariables analysis scales.
if (!Mgr->getAnalysisDeclContext(D)->getAnalysis<RelaxedLiveVariables>())
return;
+ CFGSize.set(CFG->size());
+
ExprEngine Eng(CTU, *Mgr, VisitedCallees, &FunctionSummaries, IMode);
// Execute the worklist algorithm.
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 5a4e805..dad3d0da 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -654,6 +654,9 @@ PrototypeDescriptor::parsePrototypeDescriptor(
case 'F':
TM |= TypeModifier::Float;
break;
+ case 'Y':
+ TM |= TypeModifier::BFloat;
+ break;
case 'S':
TM |= TypeModifier::LMUL1;
break;
@@ -704,6 +707,8 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
ElementBitwidth *= 2;
LMUL.MulLog2LMUL(1);
Scale = LMUL.getScale(ElementBitwidth);
+ if (ScalarType == ScalarTypeKind::BFloat)
+ ScalarType = ScalarTypeKind::Float;
break;
case VectorTypeModifier::Widening4XVector:
ElementBitwidth *= 4;
diff --git a/clang/lib/Tooling/CompilationDatabase.cpp b/clang/lib/Tooling/CompilationDatabase.cpp
index 860457a..4070bb8 100644
--- a/clang/lib/Tooling/CompilationDatabase.cpp
+++ b/clang/lib/Tooling/CompilationDatabase.cpp
@@ -403,7 +403,7 @@ namespace tooling {
// This anchor is used to force the linker to link in the generated object file
// and thus register the JSONCompilationDatabasePlugin.
extern volatile int JSONAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED JSONAnchorDest = JSONAnchorSource;
+[[maybe_unused]] static int JSONAnchorDest = JSONAnchorSource;
} // namespace tooling
} // namespace clang
diff --git a/clang/lib/Tooling/Execution.cpp b/clang/lib/Tooling/Execution.cpp
index 247b260..d0499fa 100644
--- a/clang/lib/Tooling/Execution.cpp
+++ b/clang/lib/Tooling/Execution.cpp
@@ -96,9 +96,9 @@ createExecutorFromCommandLineArgs(int &argc, const char **argv,
// and thus register the StandaloneToolExecutorPlugin etc.
extern volatile int StandaloneToolExecutorAnchorSource;
extern volatile int AllTUsToolExecutorAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED StandaloneToolExecutorAnchorDest =
+[[maybe_unused]] static int StandaloneToolExecutorAnchorDest =
StandaloneToolExecutorAnchorSource;
-static int LLVM_ATTRIBUTE_UNUSED AllTUsToolExecutorAnchorDest =
+[[maybe_unused]] static int AllTUsToolExecutorAnchorDest =
AllTUsToolExecutorAnchorSource;
} // end namespace tooling
diff --git a/clang/lib/Tooling/Syntax/BuildTree.cpp b/clang/lib/Tooling/Syntax/BuildTree.cpp
index 90fd1f9..9d49d72 100644
--- a/clang/lib/Tooling/Syntax/BuildTree.cpp
+++ b/clang/lib/Tooling/Syntax/BuildTree.cpp
@@ -77,8 +77,10 @@ static Expr *IgnoreImplicit(Expr *E) {
IgnoreCXXFunctionalCastExprWrappingConstructor);
}
-LLVM_ATTRIBUTE_UNUSED
-static bool isImplicitExpr(Expr *E) { return IgnoreImplicit(E) != E; }
+[[maybe_unused]]
+static bool isImplicitExpr(Expr *E) {
+ return IgnoreImplicit(E) != E;
+}
namespace {
/// Get start location of the Declarator from the TypeLoc.
diff --git a/clang/test/AST/ByteCode/typeid.cpp b/clang/test/AST/ByteCode/typeid.cpp
index 090309d1..aca18d4 100644
--- a/clang/test/AST/ByteCode/typeid.cpp
+++ b/clang/test/AST/ByteCode/typeid.cpp
@@ -63,9 +63,12 @@ namespace TypeidPtrInEvaluationResult {
// Regression test for crash in ArrayElemPtrPop with typeid pointers. GH-163127
namespace TypeidPtrRegression {
void dontcrash() {
- // this should just be an error and not an ICE
constexpr auto res = ((void**)&typeid(int))[0]; // both-error {{must be initialized by a constant expression}} \
- // both-note {{cast that performs the conversions of a reinterpret_cast is not allowed in a constant expression}}
+ // both-note {{cast that performs the conversions of a reinterpret_cast is not allowed in a constant expression}}
+ }
+ void dontcrash2() {
+ constexpr auto res = ((void**)&typeid(int))[1]; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{cast that performs the conversions of a reinterpret_cast is not allowed in a constant expression}}
}
}
diff --git a/clang/test/AST/HLSL/ByteAddressBuffers-AST.hlsl b/clang/test/AST/HLSL/ByteAddressBuffers-AST.hlsl
index 43d8dde..61d5e5a 100644
--- a/clang/test/AST/HLSL/ByteAddressBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/ByteAddressBuffers-AST.hlsl
@@ -142,5 +142,19 @@ RESOURCE Buffer;
// CHECK-NEXT: DeclRefExpr {{.*}} 'hlsl::[[RESOURCE]]' lvalue Var {{.*}} 'tmp' 'hlsl::[[RESOURCE]]'
// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
+// GetDimensions method
+
+// CHECK-NEXT: CXXMethodDecl {{.*}} GetDimensions 'void (out unsigned int)'
+// CHECK-NEXT: ParmVarDecl {{.*}} dim 'unsigned int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+// CHECK-NEXT: CompoundStmt
+// CHECK-NEXT: CallExpr {{.*}} 'void'
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'void (*)(...) noexcept' <BuiltinFnToFnPtr>
+// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_getdimensions_x' 'void (...) noexcept'
+// CHECK-NEXT: MemberExpr {{.*}} '__hlsl_resource_t {{.*}}' lvalue .__handle {{.*}}
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]' lvalue implicit this
+// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'dim' 'unsigned int &__restrict'
+// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
+
// CHECK-NOSUBSCRIPT-NOT: CXXMethodDecl {{.*}} operator[] 'const char8_t &(unsigned int) const'
// CHECK-NOSUBSCRIPT-NOT: CXXMethodDecl {{.*}} operator[] 'char8_t &(unsigned int)'
diff --git a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
index e72207e..7a8c57c 100644
--- a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
@@ -408,6 +408,28 @@ RESOURCE<float> Buffer;
// CHECK-CONSUME-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-CONSUME-NEXT: IntegerLiteral {{.*}} 'int' -1
+// GetDimensions method
+
+// CHECK: CXXMethodDecl {{.*}} GetDimensions 'void (out unsigned int, out unsigned int)'
+// CHECK-NEXT: ParmVarDecl {{.*}} numStructs 'unsigned int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+// CHECK-NEXT: ParmVarDecl {{.*}} stride 'unsigned int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+// CHECK-NEXT: CompoundStmt
+// CHECK-NEXT: CallExpr {{.*}} 'void'
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'void (*)(...) noexcept' <BuiltinFnToFnPtr>
+// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_getdimensions_x' 'void (...) noexcept'
+// CHECK-NEXT: MemberExpr {{.*}} '__hlsl_resource_t {{.*}}' lvalue .__handle {{.*}}
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'numStructs' 'unsigned int &__restrict'
+// CHECK-NEXT: CallExpr {{.*}} 'void'
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'void (*)(...) noexcept' <BuiltinFnToFnPtr>
+// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_getstride' 'void (...) noexcept'
+// CHECK-NEXT: MemberExpr {{.*}} '__hlsl_resource_t {{.*}}' lvalue .__handle {{.*}}
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'stride' 'unsigned int &__restrict'
+// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
+
// CHECK: ClassTemplateSpecializationDecl {{.*}} class [[RESOURCE]] definition
// CHECK: TemplateArgument type 'float'
// CHECK-NEXT: BuiltinType {{.*}} 'float'
diff --git a/clang/test/AST/HLSL/TypedBuffers-AST.hlsl b/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
index 5182ce1..14e274d 100644
--- a/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
@@ -214,6 +214,20 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
+// GetDimensions method
+
+// CHECK-NEXT: CXXMethodDecl {{.*}} GetDimensions 'void (out unsigned int)'
+// CHECK-NEXT: ParmVarDecl {{.*}} dim 'unsigned int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+// CHECK-NEXT: CompoundStmt
+// CHECK-NEXT: CallExpr {{.*}} 'void'
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'void (*)(...) noexcept' <BuiltinFnToFnPtr>
+// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_getdimensions_x' 'void (...) noexcept'
+// CHECK-NEXT: MemberExpr {{.*}} '__hlsl_resource_t {{.*}}' lvalue .__handle {{.*}}
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'dim' 'unsigned int &__restrict'
+// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
+
// CHECK: ClassTemplateSpecializationDecl {{.*}} class [[RESOURCE]] definition
// CHECK: TemplateArgument type 'float'
diff --git a/clang/test/Analysis/analyzer-stats/entry-point-stats.cpp b/clang/test/Analysis/analyzer-stats/entry-point-stats.cpp
index 2a0caad..ebbc015 100644
--- a/clang/test/Analysis/analyzer-stats/entry-point-stats.cpp
+++ b/clang/test/Analysis/analyzer-stats/entry-point-stats.cpp
@@ -8,9 +8,9 @@
// CHECK-NEXT: "c:@F@fib#i#": {
// CHECK-NEXT: "File": "{{.*}}entry-point-stats.cpp",
// CHECK-NEXT: "DebugName": "fib(unsigned int)",
+// CHECK-NEXT: "CFGSize": "5",
// CHECK-NEXT: "PathRunningTime": "{{[0-9]+}}",
// CHECK-NEXT: "MaxBugClassSize": "{{[0-9]+}}",
-// CHECK-NEXT: "MaxCFGSize": "{{[0-9]+}}",
// CHECK-NEXT: "MaxQueueSize": "{{[0-9]+}}",
// CHECK-NEXT: "MaxReachableSize": "{{[0-9]+}}",
// CHECK-NEXT: "MaxTimeSpentSolvingZ3Queries": "{{[0-9]+}}",
@@ -45,9 +45,9 @@
// CHECK-NEXT: "c:@F@main#I#**C#": {
// CHECK-NEXT: "File": "{{.*}}entry-point-stats.cpp",
// CHECK-NEXT: "DebugName": "main(int, char **)",
+// CHECK-NEXT: "CFGSize": "3",
// CHECK-NEXT: "PathRunningTime": "{{[0-9]+}}",
// CHECK-NEXT: "MaxBugClassSize": "{{[0-9]+}}",
-// CHECK-NEXT: "MaxCFGSize": "{{[0-9]+}}",
// CHECK-NEXT: "MaxQueueSize": "{{[0-9]+}}",
// CHECK-NEXT: "MaxReachableSize": "{{[0-9]+}}",
// CHECK-NEXT: "MaxTimeSpentSolvingZ3Queries": "{{[0-9]+}}",
diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp
index d7488bf..82add4b 100644
--- a/clang/test/CIR/CodeGen/array.cpp
+++ b/clang/test/CIR/CodeGen/array.cpp
@@ -123,7 +123,7 @@ void func() {
// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR" cir.store %[[TMP]], %[[INIT_2]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define{{.*}} void @_Z4funcv()
+// LLVM: define{{.*}} void @_Z4funcv(){{.*}}
// LLVM-NEXT: %[[ARR:.*]] = alloca [10 x i32], i64 1, align 16
// LLVM-NEXT: %[[INIT:.*]] = alloca i32, i64 1, align 4
// LLVM-NEXT: %[[INIT_2:.*]] = alloca i32, i64 1, align 4
@@ -174,7 +174,7 @@ void func2() {
// CIR: cir.condition(%[[CMP]])
// CIR: }
-// LLVM: define{{.*}} void @_Z5func2v()
+// LLVM: define{{.*}} void @_Z5func2v(){{.*}}
// LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4
// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR]], i32 0
@@ -224,7 +224,7 @@ void func3() {
// CIR: %[[ELE_TMP:.*]] = cir.load{{.*}} %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store{{.*}} %[[ELE_TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define{{.*}} void @_Z5func3v()
+// LLVM: define{{.*}} void @_Z5func3v(){{.*}}
// LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4
// LLVM: %[[IDX:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
@@ -276,7 +276,7 @@ void func4() {
// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[ELE_0]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store{{.*}} %[[TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define{{.*}} void @_Z5func4v()
+// LLVM: define{{.*}} void @_Z5func4v(){{.*}}
// LLVM: %[[ARR:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0
@@ -329,7 +329,7 @@ void func5() {
// CIR: cir.condition(%[[CMP]])
// CIR: }
-// LLVM: define{{.*}} void @_Z5func5v()
+// LLVM: define{{.*}} void @_Z5func5v(){{.*}}
// LLVM: %[[ARR:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0
@@ -372,7 +372,7 @@ void func6() {
// CIR: %[[V1:.*]] = cir.const #cir.int<5> : !s32i
// CIR: cir.store{{.*}} %[[V1]], %[[ELE_PTR]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define{{.*}} void @_Z5func6v()
+// LLVM: define{{.*}} void @_Z5func6v(){{.*}}
// LLVM: %[[VAR:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4
// LLVM: store i32 4, ptr %[[VAR]], align 4
@@ -414,7 +414,7 @@ void func7() {
// CIR: cir.condition(%[[CMP]])
// CIR: }
-// LLVM: define{{.*}} void @_Z5func7v()
+// LLVM: define{{.*}} void @_Z5func7v(){{.*}}
// LLVM: %[[ARR:.*]] = alloca [1 x ptr], i64 1, align 8
// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[ARR_PTR:.*]] = getelementptr ptr, ptr %[[ARR]], i32 0
@@ -458,7 +458,7 @@ void func8(int arr[10]) {
// CIR: %[[TMP_4:.*]] = cir.load{{.*}} %[[ELE_1]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store{{.*}} %[[TMP_4]], %[[INIT_2]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define{{.*}} void @_Z5func8Pi(ptr %[[ARG:.*]])
+// LLVM: define{{.*}} void @_Z5func8Pi(ptr %[[ARG:.*]]){{.*}}
// LLVM: %[[ARR:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[INIT_2:.*]] = alloca i32, i64 1, align 4
@@ -502,7 +502,7 @@ void func9(int arr[10][5]) {
// CIR: %[[TMP_2:.*]] = cir.load{{.*}} %[[ARR_1_2]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store{{.*}} %[[TMP_2]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define{{.*}} void @_Z5func9PA5_i(ptr %[[ARG:.*]])
+// LLVM: define{{.*}} void @_Z5func9PA5_i(ptr %[[ARG:.*]]){{.*}}
// LLVM: %[[ARR:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
// LLVM: store ptr %[[ARG]], ptr %[[ARR]], align 8
@@ -536,7 +536,7 @@ void func10(int *a) {
// CIR: %[[TMP_2:.*]] = cir.load{{.*}} %[[ELE]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store{{.*}} %[[TMP_2]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define{{.*}} void @_Z6func10Pi(ptr %[[ARG:.*]]) {
+// LLVM: define{{.*}} void @_Z6func10Pi(ptr %[[ARG:.*]]){{.*}} {
// LLVM: %[[ARR:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
// LLVM: store ptr %[[ARG]], ptr %[[ARR]], align 8
diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp
index 1089d4b..66d4b48 100644
--- a/clang/test/CIR/CodeGen/assign-operator.cpp
+++ b/clang/test/CIR/CodeGen/assign-operator.cpp
@@ -20,7 +20,7 @@ void a() {
// CIR: %[[ONE_CAST:.*]] = cir.cast integral %[[ONE]] : !u32i -> !s32i
// CIR: %[[RET:.*]] = cir.call @_ZN1xaSEi(%[[A_ADDR]], %[[ONE_CAST]]) : (!cir.ptr<!rec_x>, !s32i) -> !s32i
-// LLVM: define{{.*}} @_Z1av()
+// LLVM: define{{.*}} @_Z1av(){{.*}}
// OGCG: define{{.*}} @_Z1av()
void f(int i, int j) {
@@ -121,7 +121,7 @@ void copy_ref_to_ref(E &e1, E &e2) {
// CIR: %[[D1_REF_2:.*]] = cir.call @_ZN1DaSERKS_(%[[D1_REF]], %[[D2_REF]])
// CIR: cir.return
-// LLVM: define{{.*}} void @_Z15copy_ref_to_refR1ES0_(ptr %[[ARG0:.*]], ptr %[[ARG1:.*]]) {
+// LLVM: define{{.*}} void @_Z15copy_ref_to_refR1ES0_(ptr %[[ARG0:.*]], ptr %[[ARG1:.*]]){{.*}} {
// LLVM: %[[E1_ADDR:.*]] = alloca ptr
// LLVM: %[[E2_ADDR:.*]] = alloca ptr
// LLVM: store ptr %[[ARG0]], ptr %[[E1_ADDR]]
diff --git a/clang/test/CIR/CodeGen/binassign.c b/clang/test/CIR/CodeGen/binassign.c
index 65bea4d..dab9879 100644
--- a/clang/test/CIR/CodeGen/binassign.c
+++ b/clang/test/CIR/CodeGen/binassign.c
@@ -17,7 +17,7 @@ void binary_assign(void) {
i = 42;
}
-// CIR-LABEL: cir.func{{.*}} @binary_assign() {
+// CIR-LABEL: cir.func{{.*}} @binary_assign()
// CIR: %[[B:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b"]
// CIR: %[[C:.*]] = cir.alloca !s8i, !cir.ptr<!s8i>, ["c"]
// CIR: %[[F:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["f"]
@@ -33,7 +33,7 @@ void binary_assign(void) {
// CIR: cir.store{{.*}} %[[INT_VAL]], %[[I]] : !s32i, !cir.ptr<!s32i>
// CIR: cir.return
-// LLVM-LABEL: define {{.*}}void @binary_assign() {
+// LLVM-LABEL: define {{.*}}void @binary_assign(){{.*}} {
// LLVM: %[[B_PTR:.*]] = alloca i8
// LLVM: %[[C_PTR:.*]] = alloca i8
// LLVM: %[[F_PTR:.*]] = alloca float
diff --git a/clang/test/CIR/CodeGen/bitfields_be.c b/clang/test/CIR/CodeGen/bitfields_be.c
index 77741ba..3e1f054 100644
--- a/clang/test/CIR/CodeGen/bitfields_be.c
+++ b/clang/test/CIR/CodeGen/bitfields_be.c
@@ -27,7 +27,7 @@ int init(S* s) {
//CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "c"} : !cir.ptr<!rec_S> -> !cir.ptr<!u32i>
//CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP2]] : !cir.ptr<!u32i>) -> !s32i
-//LLVM: define dso_local i32 @init(ptr %0) {
+//LLVM: define dso_local i32 @init(ptr %0){{.*}} {
//LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
//LLVM: [[TMP1:%.*]] = alloca i32, i64 1, align 4
//LLVM: [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
@@ -59,7 +59,7 @@ void load(S* s) {
// CIR: %[[GET0:.*]] = cir.get_member %[[VAL0]][0] {name = "a"} : !cir.ptr<!rec_S> -> !cir.ptr<!u32i>
// CIR: %[[SET0:.*]] = cir.set_bitfield align(4) (#bfi_a, %[[GET0]] : !cir.ptr<!u32i>, %[[MIN1]] : !s32i) -> !s32i
-// LLVM: define dso_local void @load
+// LLVM: define dso_local void @load{{.*}}{{.*}}
// LLVM: %[[PTR0:.*]] = load ptr
// LLVM: %[[GET0:.*]] = getelementptr %struct.S, ptr %[[PTR0]], i32 0, i32 0
// LLVM: %[[VAL0:.*]] = load i32, ptr %[[GET0]], align 4
diff --git a/clang/test/CIR/CodeGen/builtin_call.cpp b/clang/test/CIR/CodeGen/builtin_call.cpp
index a30df97..a08a784 100644
--- a/clang/test/CIR/CodeGen/builtin_call.cpp
+++ b/clang/test/CIR/CodeGen/builtin_call.cpp
@@ -82,7 +82,7 @@ void library_builtins() {
__builtin_abort();
}
-// CIR: cir.func{{.*}} @_Z16library_builtinsv() {
+// CIR: cir.func{{.*}} @_Z16library_builtinsv()
// CIR: %[[NULL:.+]] = cir.const #cir.ptr<null> : !cir.ptr<!s8i>
// CIR: cir.call @printf(%[[NULL]]) nothrow : (!cir.ptr<!s8i>) -> !s32i
// CIR: cir.call @abort() nothrow : () -> ()
diff --git a/clang/test/CIR/CodeGen/builtin_printf.cpp b/clang/test/CIR/CodeGen/builtin_printf.cpp
index 898984a..7200df1 100644
--- a/clang/test/CIR/CodeGen/builtin_printf.cpp
+++ b/clang/test/CIR/CodeGen/builtin_printf.cpp
@@ -20,7 +20,7 @@ void func(char const * const str, int i) {
// CIR: cir.func{{.*}} @printf(!cir.ptr<!s8i>, ...) -> !s32i
-// CIR: cir.func{{.*}} @_Z4funcPKci(%[[arg0:.+]]: !cir.ptr<!s8i>{{.*}}, %[[arg1:.+]]: !s32i{{.*}}) {
+// CIR: cir.func{{.*}} @_Z4funcPKci(%[[arg0:.+]]: !cir.ptr<!s8i>{{.*}}, %[[arg1:.+]]: !s32i
// CIR: %[[str_ptr:.+]] = cir.alloca !cir.ptr<!s8i>, !cir.ptr<!cir.ptr<!s8i>>, ["str", init, const]
// CIR: %[[i_ptr:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init]
// CIR: cir.store %[[arg0]], %[[str_ptr]] : !cir.ptr<!s8i>, !cir.ptr<!cir.ptr<!s8i>>
diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c
index 9d516c6..d780e37 100644
--- a/clang/test/CIR/CodeGen/call.c
+++ b/clang/test/CIR/CodeGen/call.c
@@ -16,11 +16,11 @@ void f2(void) {
f1(s);
}
-// CIR-LABEL: cir.func{{.*}} @f2()
+// CIR-LABEL: cir.func{{.*}} @f2(){{.*}} {
// CIR: %[[S:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!rec_S>, !rec_S
// CIR-NEXT: cir.call @f1(%[[S]]) : (!rec_S) -> ()
-// LLVM-LABEL: define{{.*}} void @f2()
+// LLVM-LABEL: define{{.*}} void @f2(){{.*}}
// LLVM: %[[S:.+]] = load %struct.S, ptr %{{.+}}, align 4
// LLVM-NEXT: call void @f1(%struct.S %[[S]])
@@ -33,11 +33,11 @@ void f4(void) {
struct S s = f3();
}
-// CIR-LABEL: cir.func{{.*}} @f4() {
+// CIR-LABEL: cir.func{{.*}} @f4(){{.*}} {
// CIR: %[[S:.+]] = cir.call @f3() : () -> !rec_S
// CIR-NEXT: cir.store align(4) %[[S]], %{{.+}} : !rec_S, !cir.ptr<!rec_S>
-// LLVM-LABEL: define{{.*}} void @f4() {
+// LLVM-LABEL: define{{.*}} void @f4(){{.*}} {
// LLVM: %[[S:.+]] = call %struct.S @f3()
// LLVM-NEXT: store %struct.S %[[S]], ptr %{{.+}}, align 4
@@ -57,11 +57,11 @@ void f7(void) {
f5(b);
}
-// CIR-LABEL: cir.func{{.*}} @f7()
+// CIR-LABEL: cir.func{{.*}} @f7(){{.*}} {
// CIR: %[[B:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!rec_Big>, !rec_Big
// CIR-NEXT: cir.call @f5(%[[B]]) : (!rec_Big) -> ()
-// LLVM-LABEL: define{{.*}} void @f7() {
+// LLVM-LABEL: define{{.*}} void @f7(){{.*}} {
// LLVM: %[[B:.+]] = load %struct.Big, ptr %{{.+}}, align 4
// LLVM-NEXT: call void @f5(%struct.Big %[[B]])
@@ -73,11 +73,11 @@ void f8(void) {
struct Big b = f6();
}
-// CIR-LABEL: cir.func{{.*}} @f8()
+// CIR-LABEL: cir.func{{.*}} @f8(){{.*}} {
// CIR: %[[B:.+]] = cir.call @f6() : () -> !rec_Big
// CIR: cir.store align(4) %[[B]], %{{.+}} : !rec_Big, !cir.ptr<!rec_Big>
-// LLVM-LABEL: define{{.*}} void @f8() {
+// LLVM-LABEL: define{{.*}} void @f8(){{.*}} {
// LLVM: %[[B:.+]] = call %struct.Big @f6()
// LLVM-NEXT: store %struct.Big %[[B]], ptr %{{.+}}, align 4
@@ -89,14 +89,14 @@ void f9(void) {
f1(f3());
}
-// CIR-LABEL: cir.func{{.*}} @f9()
+// CIR-LABEL: cir.func{{.*}} @f9(){{.*}} {
// CIR: %[[SLOT:.+]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["agg.tmp0"] {alignment = 4 : i64}
// CIR-NEXT: %[[RET:.+]] = cir.call @f3() : () -> !rec_S
// CIR-NEXT: cir.store align(4) %[[RET]], %[[SLOT]] : !rec_S, !cir.ptr<!rec_S>
// CIR-NEXT: %[[ARG:.+]] = cir.load align(4) %[[SLOT]] : !cir.ptr<!rec_S>, !rec_S
// CIR-NEXT: cir.call @f1(%[[ARG]]) : (!rec_S) -> ()
-// LLVM-LABEL: define{{.*}} void @f9() {
+// LLVM-LABEL: define{{.*}} void @f9(){{.*}} {
// LLVM: %[[SLOT:.+]] = alloca %struct.S, i64 1, align 4
// LLVM-NEXT: %[[RET:.+]] = call %struct.S @f3()
// LLVM-NEXT: store %struct.S %[[RET]], ptr %[[SLOT]], align 4
@@ -116,13 +116,13 @@ int f12(void) {
return f10(1) + f11(2);
}
-// CIR-LABEL: cir.func{{.*}} @f12() -> !s32i
+// CIR-LABEL: cir.func{{.*}} @f12() -> !s32i{{.*}} {
// CIR: %[[A:.+]] = cir.const #cir.int<1> : !s32i
// CIR-NEXT: %{{.+}} = cir.call @f10(%[[A]]) side_effect(pure) : (!s32i) -> !s32i
// CIR-NEXT: %[[B:.+]] = cir.const #cir.int<2> : !s32i
// CIR-NEXT: %{{.+}} = cir.call @f11(%[[B]]) side_effect(const) : (!s32i) -> !s32i
-// LLVM-LABEL: define{{.*}} i32 @f12()
+// LLVM-LABEL: define{{.*}} i32 @f12(){{.*}}
// LLVM: %{{.+}} = call i32 @f10(i32 1) #[[ATTR0:.+]]
// LLVM-NEXT: %{{.+}} = call i32 @f11(i32 2) #[[ATTR1:.+]]
diff --git a/clang/test/CIR/CodeGen/call.cpp b/clang/test/CIR/CodeGen/call.cpp
index 3e8cfc1..affa8af 100644
--- a/clang/test/CIR/CodeGen/call.cpp
+++ b/clang/test/CIR/CodeGen/call.cpp
@@ -12,7 +12,7 @@ void f2() {
// CIR-LABEL: cir.func{{.*}} @_Z2f2v
// CIR: cir.call @_Z2f1v() : () -> ()
-// LLVM-LABEL: define{{.*}} void @_Z2f2v() {
+// LLVM-LABEL: define{{.*}} void @_Z2f2v(){{.*}} {
// LLVM: call void @_Z2f1v()
int f3() { return 2; }
@@ -25,7 +25,7 @@ int f4() {
// CIR-LABEL: cir.func{{.*}} @_Z2f4v() -> !s32i
// CIR: cir.call @_Z2f3v() : () -> !s32i
-// LLVM-LABEL: define{{.*}} i32 @_Z2f4v() {
+// LLVM-LABEL: define{{.*}} i32 @_Z2f4v(){{.*}} {
// LLVM: %{{.+}} = call i32 @_Z2f3v()
int f5(int a, int *b, bool c);
@@ -40,7 +40,7 @@ int f6() {
// CIR-NEXT: %[[#c:]] = cir.const #false
// CIR-NEXT: %{{.+}} = cir.call @_Z2f5iPib(%[[#a]], %[[#b:]], %[[#c]]) : (!s32i, !cir.ptr<!s32i>, !cir.bool) -> !s32i
-// LLVM-LABEL: define{{.*}} i32 @_Z2f6v() {
+// LLVM-LABEL: define{{.*}} i32 @_Z2f6v(){{.*}} {
// LLVM: %{{.+}} = call i32 @_Z2f5iPib(i32 2, ptr %{{.+}}, i1 false)
int f7(int (*ptr)(int, int)) {
@@ -67,7 +67,7 @@ void f9() {
// CIR: cir.call @_Z2f8iz(%{{.+}}) : (!s32i) -> ()
// CIR: cir.call @_Z2f8iz(%{{.+}}, %{{.+}}, %{{.+}}, %{{.+}}) : (!s32i, !s32i, !s32i, !s32i) -> ()
-// LLVM-LABEL: define{{.*}} void @_Z2f9v()
+// LLVM-LABEL: define{{.*}} void @_Z2f9v(){{.*}}
// LLVM: call void (i32, ...) @_Z2f8iz(i32 1)
// LLVM: call void (i32, ...) @_Z2f8iz(i32 1, i32 2, i32 3, i32 4)
@@ -85,7 +85,7 @@ void f11() {
// CIR: %[[#s:]] = cir.call @_Z3f10v() : () -> !rec_S
// CIR-NEXT: cir.store align(4) %[[#s]], %{{.+}} : !rec_S, !cir.ptr<!rec_S>
-// LLVM-LABEL: define{{.*}} void @_Z3f11v()
+// LLVM-LABEL: define{{.*}} void @_Z3f11v(){{.*}}
// LLVM: %[[#s:]] = call %struct.S @_Z3f10v()
// LLVM-NEXT: store %struct.S %[[#s]], ptr %{{.+}}, align 4
@@ -98,7 +98,7 @@ void f12() {
// CIR-NEXT: %[[#ret:]] = cir.call @_Z3f10v() : () -> !rec_S
// CIR-NEXT: cir.store align(4) %[[#ret]], %[[#slot]] : !rec_S, !cir.ptr<!rec_S>
-// LLVM-LABEL: define{{.*}} void @_Z3f12v() {
+// LLVM-LABEL: define{{.*}} void @_Z3f12v(){{.*}} {
// LLVM: %[[#slot:]] = alloca %struct.S, i64 1, align 4
// LLVM-NEXT: %[[#ret:]] = call %struct.S @_Z3f10v()
// LLVM-NEXT: store %struct.S %[[#ret]], ptr %[[#slot]], align 4
@@ -112,7 +112,7 @@ void f14() {
// CIR: cir.call @_Z3f13v() nothrow : () -> ()
// CIR: }
-// LLVM-LABEL: define{{.+}} void @_Z3f14v()
+// LLVM-LABEL: define{{.+}} void @_Z3f14v(){{.*}}
// LLVM: call void @_Z3f13v() #[[LLVM_ATTR_0:.+]]
// LLVM: }
@@ -126,7 +126,7 @@ void f16() {
// CIR-NEXT: %{{.+}} = cir.call @_Z3f15v() : () -> !s32i
// CIR: }
-// LLVM-LABEL: define{{.+}} void @_Z3f16v() {
+// LLVM-LABEL: define{{.+}} void @_Z3f16v(){{.*}} {
// LLVM-NEXT: %{{.+}} = call i32 @_Z3f15v()
// LLVM: }
diff --git a/clang/test/CIR/CodeGen/cmp.cpp b/clang/test/CIR/CodeGen/cmp.cpp
index 7e32d16..1871f94 100644
--- a/clang/test/CIR/CodeGen/cmp.cpp
+++ b/clang/test/CIR/CodeGen/cmp.cpp
@@ -45,7 +45,7 @@ void c0(int a, int b) {
// CIR: %[[B6:.*]] = cir.load{{.*}} %[[B_PTR]]
// CIR: %{{.*}} = cir.cmp(eq, %[[A6]], %[[B6]]) : !s32i, !cir.bool
-// LLVM-LABEL: define{{.*}} void @_Z2c0ii(i32 %0, i32 %1) {
+// LLVM-LABEL: define{{.*}} void @_Z2c0ii(i32 %0, i32 %1){{.*}} {
// LLVM: %[[PTR1:.*]] = alloca i32, i64 1
// LLVM: %[[PTR2:.*]] = alloca i32, i64 1
// LLVM: %[[BOOL_PTR:.*]] = alloca i8, i64 1
@@ -170,7 +170,7 @@ void c0_unsigned(unsigned int a, unsigned int b) {
// CIR: %[[UB6:.*]] = cir.load{{.*}} %[[U_B_PTR]]
// CIR: %{{.*}} = cir.cmp(eq, %[[UA6]], %[[UB6]]) : !u32i, !cir.bool
-// LLVM-LABEL: define{{.*}} void @_Z11c0_unsignedjj(i32 %0, i32 %1) {
+// LLVM-LABEL: define{{.*}} void @_Z11c0_unsignedjj(i32 %0, i32 %1){{.*}} {
// LLVM: %[[U_PTR1:.*]] = alloca i32, i64 1
// LLVM: %[[U_PTR2:.*]] = alloca i32, i64 1
// LLVM: %[[U_BOOL_PTR:.*]] = alloca i8, i64 1
@@ -265,7 +265,7 @@ void c0_float(float a, float b) {
x = a == b;
}
-// CIR-LABEL: cir.func{{.*}} @_Z8c0_floatff(%arg0: !cir.float{{.*}}, %arg1: !cir.float{{.*}}) {
+// CIR-LABEL: cir.func{{.*}} @_Z8c0_floatff(%arg0: !cir.float{{.*}}, %arg1: !cir.float{{.*}})
// CIR: %[[A_PTR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", init]
// CIR: %[[B_PTR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init]
// CIR: %[[X_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["x", init]
@@ -303,7 +303,7 @@ void c0_float(float a, float b) {
// CIR: %[[CMP6:.*]] = cir.cmp(eq, %[[A6]], %[[B6]]) : !cir.float, !cir.bool
// CIR: cir.store{{.*}} %[[CMP6]], %[[X_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
-// LLVM-LABEL: define{{.*}} void @_Z8c0_floatff(float %0, float %1) {
+// LLVM-LABEL: define{{.*}} void @_Z8c0_floatff(float %0, float %1){{.*}} {
// LLVM: %[[A_PTR:.*]] = alloca float
// LLVM: %[[B_PTR:.*]] = alloca float
// LLVM: store float %0, ptr %[[A_PTR]]
@@ -346,7 +346,7 @@ void pointer_cmp(int *a, int *b) {
x = a != b;
}
-// CIR-LABEL: cir.func{{.*}} @_Z11pointer_cmpPiS_(%arg0: !cir.ptr<!s32i>{{.*}}, %arg1: !cir.ptr<!s32i>{{.*}}) {
+// CIR-LABEL: cir.func{{.*}} @_Z11pointer_cmpPiS_(%arg0: !cir.ptr<!s32i>{{.*}}, %arg1: !cir.ptr<!s32i>{{.*}}){{.*}} {
// CIR: %[[A_PTR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["a", init]
// CIR: %[[B_PTR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["b", init]
@@ -360,7 +360,7 @@ void pointer_cmp(int *a, int *b) {
// CIR: cir.cmp(eq, {{.*}}, {{.*}}) : !cir.ptr<!s32i>, !cir.bool
// CIR: cir.cmp(ne, {{.*}}, {{.*}}) : !cir.ptr<!s32i>, !cir.bool
-// LLVM-LABEL: define{{.*}} void @_Z11pointer_cmpPiS_(ptr %0, ptr %1) {
+// LLVM-LABEL: define{{.*}} void @_Z11pointer_cmpPiS_(ptr %0, ptr %1){{.*}} {
// LLVM: %[[A_PTR:.*]] = alloca ptr
// LLVM: %[[B_PTR:.*]] = alloca ptr
// LLVM: store ptr %0, ptr %[[A_PTR]]
@@ -401,7 +401,7 @@ void bool_cmp(bool a, bool b) {
x = a != b;
}
-// CIR-LABEL: cir.func{{.*}} @_Z8bool_cmpbb(%arg0: !cir.bool{{.*}}, %arg1: !cir.bool{{.*}}) {
+// CIR-LABEL: cir.func{{.*}} @_Z8bool_cmpbb(%arg0: !cir.bool{{.*}}, %arg1: !cir.bool{{.*}}){{.*}} {
// CIR: %[[A_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["a", init]
// CIR: %[[B_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b", init]
// CIR: %[[X_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["x", init]
@@ -419,7 +419,7 @@ void bool_cmp(bool a, bool b) {
// CIR: cir.cmp(eq
// CIR: cir.cmp(ne
-// LLVM-LABEL: define{{.*}} void @_Z8bool_cmpbb(i1 %0, i1 %1) {
+// LLVM-LABEL: define{{.*}} void @_Z8bool_cmpbb(i1 %0, i1 %1){{.*}} {
// LLVM: %[[A_PTR:.*]] = alloca i8
// LLVM: %[[B_PTR:.*]] = alloca i8
// LLVM: %[[X_PTR:.*]] = alloca i8
diff --git a/clang/test/CIR/CodeGen/comma.c b/clang/test/CIR/CodeGen/comma.c
index cc26a3f..c0bc442 100644
--- a/clang/test/CIR/CodeGen/comma.c
+++ b/clang/test/CIR/CodeGen/comma.c
@@ -16,7 +16,7 @@ void comma(void) {
i = 100, 200;
}
-// CIR-LABEL: cir.func{{.*}} @comma() {
+// CIR-LABEL: cir.func{{.*}} @comma()
// CIR: %[[B:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b"]
// CIR: %[[C:.*]] = cir.alloca !s8i, !cir.ptr<!s8i>, ["c"]
// CIR: %[[F:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["f"]
@@ -34,7 +34,7 @@ void comma(void) {
// CIR: cir.store{{.*}} %[[HUNDRED]], %[[I]] : !s32i, !cir.ptr<!s32i>
// CIR: cir.return
-// LLVM-LABEL: define {{.*}}void @comma() {
+// LLVM-LABEL: define {{.*}}void @comma(){{.*}} {
// LLVM: %[[B_PTR:.*]] = alloca i8
// LLVM: %[[C_PTR:.*]] = alloca i8
// LLVM: %[[F_PTR:.*]] = alloca float
diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp
index 2b06bb0..238e415 100644
--- a/clang/test/CIR/CodeGen/ctor.cpp
+++ b/clang/test/CIR/CodeGen/ctor.cpp
@@ -49,7 +49,7 @@ void bar() {
// CHECK: cir.func{{.*}} @_ZN13VariadicStrukC1Eiz(%arg0: !cir.ptr<!rec_VariadicStruk>
// CHECK-SAME: %arg1: !s32i
-// CHECK-SAME: ...) {
+// CHECK-SAME: ...){{.*}} {
// CHECK-NEXT: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
// CHECK-NEXT: %[[N_ADDR:.*]] = cir.alloca {{.*}} ["n", init]
// CHECK-NEXT: cir.store %arg0, %[[THIS_ADDR]]
diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp
index 7fb0975..cb3886b 100644
--- a/clang/test/CIR/CodeGen/dtors.cpp
+++ b/clang/test/CIR/CodeGen/dtors.cpp
@@ -17,7 +17,7 @@ void test_temporary_dtor() {
// CIR: %[[ALLOCA:.*]] = cir.alloca !rec_A, !cir.ptr<!rec_A>, ["agg.tmp0"]
// CIR: cir.call @_ZN1AD1Ev(%[[ALLOCA]]) nothrow : (!cir.ptr<!rec_A>) -> ()
-// LLVM: define dso_local void @_Z19test_temporary_dtorv()
+// LLVM: define dso_local void @_Z19test_temporary_dtorv(){{.*}}
// LLVM: %[[ALLOCA:.*]] = alloca %struct.A, i64 1, align 1
// LLVM: call void @_ZN1AD1Ev(ptr %[[ALLOCA]])
@@ -55,7 +55,7 @@ bool test_temp_or() { return make_temp(1) || make_temp(2); }
// CIR: cir.yield %[[TERNARY]] : !cir.bool
// CIR: } : !cir.bool
-// LLVM: define{{.*}} i1 @_Z12test_temp_orv() {
+// LLVM: define{{.*}} i1 @_Z12test_temp_orv(){{.*}} {
// LLVM: %[[REF_TMP0:.*]] = alloca %struct.B
// LLVM: %[[REF_TMP1:.*]] = alloca %struct.B
// LLVM: br label %[[LOR_BEGIN:.*]]
@@ -125,7 +125,7 @@ bool test_temp_and() { return make_temp(1) && make_temp(2); }
// CIR: cir.yield %[[TERNARY]] : !cir.bool
// CIR: } : !cir.bool
-// LLVM: define{{.*}} i1 @_Z13test_temp_andv() {
+// LLVM: define{{.*}} i1 @_Z13test_temp_andv(){{.*}} {
// LLVM: %[[REF_TMP0:.*]] = alloca %struct.B
// LLVM: %[[REF_TMP1:.*]] = alloca %struct.B
// LLVM: br label %[[LAND_BEGIN:.*]]
@@ -199,7 +199,7 @@ void test_nested_dtor() {
// CIR: cir.func{{.*}} @_Z16test_nested_dtorv()
// CIR: cir.call @_ZN1DD2Ev(%{{.*}})
-// LLVM: define {{.*}} void @_Z16test_nested_dtorv()
+// LLVM: define {{.*}} void @_Z16test_nested_dtorv(){{.*}}
// LLVM: call void @_ZN1DD2Ev(ptr %{{.*}})
// OGCG: define {{.*}} void @_Z16test_nested_dtorv()
@@ -236,7 +236,7 @@ void test_base_dtor_call() {
// CIR: cir.func {{.*}} @_Z19test_base_dtor_callv()
// cir.call @_ZN1FD2Ev(%{{.*}}) nothrow : (!cir.ptr<!rec_F>) -> ()
-// LLVM: define {{.*}} void @_Z19test_base_dtor_callv()
+// LLVM: define {{.*}} void @_Z19test_base_dtor_callv(){{.*}}
// LLVM: call void @_ZN1FD2Ev(ptr %{{.*}})
// OGCG: define {{.*}} void @_Z19test_base_dtor_callv()
diff --git a/clang/test/CIR/CodeGen/inline-attributes.cpp b/clang/test/CIR/CodeGen/inline-attributes.cpp
new file mode 100644
index 0000000..fab4010
--- /dev/null
+++ b/clang/test/CIR/CodeGen/inline-attributes.cpp
@@ -0,0 +1,75 @@
+// RUN: %clang_cc1 -std=c++11 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -std=c++11 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -std=c++11 -triple x86_64-unknown-linux-gnu -O1 -emit-llvm %s -o %t.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
+
+extern int global_var;
+
+__attribute__((always_inline)) inline int always_inline_function(int x) {
+ return x * 2 + global_var;
+}
+
+inline int inline_hint_function(int x) {
+ return x - 1 + global_var;
+}
+
+__attribute__((noinline)) int noinline_function(int x) {
+ return x / 2 + global_var;
+}
+
+int regular_function(int x) {
+ return x + 1 + global_var;
+}
+
+// Force emission of all functions with function pointers
+int (*always_inline_ptr)(int) = &always_inline_function;
+int (*inline_hint_ptr)(int) = &inline_hint_function;
+int (*noinline_ptr)(int) = &noinline_function;
+int (*regular_ptr)(int) = &regular_function;
+
+// CIR-LABEL: cir.func dso_local @_Z17noinline_functioni(%arg0: !s32i {{.*}}) -> !s32i inline(never)
+
+// CIR-LABEL: cir.func dso_local @_Z16regular_functioni(%arg0: !s32i {{.*}}) -> !s32i
+// CIR-NOT: inline(never)
+// CIR-NOT: inline(always)
+// CIR-NOT: inline(hint)
+// CIR-SAME: {
+
+// CIR-LABEL: cir.func {{.*}}@_Z22always_inline_functioni(%arg0: !s32i {{.*}}) -> !s32i inline(always)
+
+// CIR-LABEL: cir.func {{.*}}@_Z20inline_hint_functioni(%arg0: !s32i {{.*}}) -> !s32i inline(hint)
+
+// LLVM: ; Function Attrs:{{.*}} noinline
+// LLVM: define{{.*}} i32 @_Z17noinline_functioni
+
+// LLVM: ; Function Attrs:
+// LLVM-NOT: noinline
+// LLVM-NOT: alwaysinline
+// LLVM-NOT: inlinehint
+// LLVM-SAME: {{$}}
+// LLVM: define{{.*}} i32 @_Z16regular_functioni
+
+// LLVM: ; Function Attrs:{{.*}} alwaysinline
+// LLVM: define{{.*}} i32 @_Z22always_inline_functioni
+
+// LLVM: ; Function Attrs:{{.*}} inlinehint
+// LLVM: define{{.*}} i32 @_Z20inline_hint_functioni
+
+// OGCG: ; Function Attrs:{{.*}} noinline
+// OGCG: define{{.*}} i32 @_Z17noinline_functioni
+
+// OGCG: ; Function Attrs:
+// OGCG-NOT: noinline
+// OGCG-NOT: alwaysinline
+// OGCG-NOT: inlinehint
+// OGCG-SAME: {{$}}
+// OGCG: define{{.*}} i32 @_Z16regular_functioni
+
+// OGCG: ; Function Attrs:{{.*}} alwaysinline
+// OGCG: define{{.*}} i32 @_Z22always_inline_functioni
+
+// OGCG: ; Function Attrs:{{.*}} inlinehint
+// OGCG: define{{.*}} i32 @_Z20inline_hint_functioni
+
diff --git a/clang/test/CIR/CodeGen/label.c b/clang/test/CIR/CodeGen/label.c
index f5345ef..fd3c7f2 100644
--- a/clang/test/CIR/CodeGen/label.c
+++ b/clang/test/CIR/CodeGen/label.c
@@ -41,7 +41,7 @@ labelC:
// CIR: cir.label "labelC"
// CIR: cir.return
-// LLVM: define dso_local void @multiple_labels()
+// LLVM: define dso_local void @multiple_labels(){{.*}}
// LLVM: br label %1
// LLVM: 1:
// LLVM: br label %2
@@ -73,7 +73,7 @@ labelD:
// CIR: }
// CIR: cir.return
-// LLVM: define dso_local void @label_in_if
+// LLVM: define dso_local void @label_in_if{{.*}}
// LLVM: br label %3
// LLVM: 3:
// LLVM: [[LOAD:%.*]] = load i32, ptr [[COND:%.*]], align 4
@@ -115,7 +115,7 @@ void after_return() {
// CIR: cir.label "label"
// CIR: cir.br ^bb1
-// LLVM: define dso_local void @after_return
+// LLVM: define dso_local void @after_return{{.*}}
// LLVM: br label %1
// LLVM: 1:
// LLVM: ret void
@@ -139,7 +139,7 @@ void after_unreachable() {
// CIR: cir.label "label"
// CIR: cir.return
-// LLVM: define dso_local void @after_unreachable
+// LLVM: define dso_local void @after_unreachable{{.*}}
// LLVM: unreachable
// LLVM: 1:
// LLVM: ret void
@@ -188,7 +188,7 @@ void foo() {
// CIR: ^bb1:
// CIR: cir.label "label"
-// LLVM:define dso_local void @foo() {
+// LLVM: define dso_local void @foo(){{.*}} {
// LLVM: [[ALLOC:%.*]] = alloca %struct.S, i64 1, align 1
// LLVM: br label %2
// LLVM:2:
diff --git a/clang/test/CIR/CodeGen/lambda-static-invoker.cpp b/clang/test/CIR/CodeGen/lambda-static-invoker.cpp
index 15d768e..e7d199b 100644
--- a/clang/test/CIR/CodeGen/lambda-static-invoker.cpp
+++ b/clang/test/CIR/CodeGen/lambda-static-invoker.cpp
@@ -50,7 +50,7 @@ int g3() {
// CIR: %[[RET:.*]] = cir.load %[[RETVAL]]
// CIR: cir.return %[[RET]]
-// LLVM: define internal i32 @"_ZZ2g3vENK3$_0clERKi"(ptr %[[THIS_ARG:.*]], ptr %[[REF_I_ARG:.*]]) {
+// LLVM: define internal i32 @"_ZZ2g3vENK3$_0clERKi"(ptr %[[THIS_ARG:.*]], ptr %[[REF_I_ARG:.*]]){{.*}} {
// LLVM: %[[THIS_ALLOCA:.*]] = alloca ptr
// LLVM: %[[REF_I_ALLOCA:.*]] = alloca ptr
// LLVM: %[[RETVAL:.*]] = alloca i32
@@ -66,7 +66,7 @@ int g3() {
// In OGCG, the _ZZ2g3vENK3$_0clERKi function is emitted after _ZZ2g3vEN3$_08__invokeERKi, see below.
// lambda invoker
-// CIR: cir.func internal private dso_local @_ZZ2g3vEN3$_08__invokeERKi(%[[REF_I_ARG:.*]]: !cir.ptr<!s32i> {{.*}}) -> !s32i {
+// CIR: cir.func internal private dso_local @_ZZ2g3vEN3$_08__invokeERKi(%[[REF_I_ARG:.*]]: !cir.ptr<!s32i> {{.*}}) -> !s32i{{.*}} {
// CIR: %[[REF_I_ALLOCA:.*]] = cir.alloca {{.*}} ["i", init, const]
// CIR: %[[RETVAL:.*]] = cir.alloca {{.*}} ["__retval"]
// CIR: %[[LAM_ALLOCA:.*]] = cir.alloca ![[REC_LAM_G3]], !cir.ptr<![[REC_LAM_G3]]>, ["unused.capture"]
@@ -77,7 +77,7 @@ int g3() {
// CIR: %[[RET:.*]] = cir.load %[[RETVAL]]
// CIR: cir.return %[[RET]]
-// LLVM: define internal i32 @"_ZZ2g3vEN3$_08__invokeERKi"(ptr %[[REF_I_ARG:.*]]) {
+// LLVM: define internal i32 @"_ZZ2g3vEN3$_08__invokeERKi"(ptr %[[REF_I_ARG:.*]]){{.*}} {
// LLVM: %[[REF_I_ALLOCA:.*]] = alloca ptr
// LLVM: %[[RETVAL:.*]] = alloca i32
// LLVM: %[[LAM_ALLOCA:.*]] = alloca %[[REC_LAM_G3:.*]],
@@ -91,7 +91,7 @@ int g3() {
// In OGCG, the _ZZ2g3vEN3$_08__invokeERKi function is emitted after _ZN1A3barEv, see below.
// lambda operator int (*)(int const&)()
-// CIR: cir.func internal private dso_local @_ZZ2g3vENK3$_0cvPFiRKiEEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_G3]]> {{.*}}) -> !cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> !s32i>> {
+// CIR: cir.func internal private dso_local @_ZZ2g3vENK3$_0cvPFiRKiEEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_G3]]> {{.*}}) -> !cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> !s32i>>{{.*}} {
// CIR: %[[THIS_ALLOCA:.*]] = cir.alloca !cir.ptr<![[REC_LAM_G3]]>, !cir.ptr<!cir.ptr<![[REC_LAM_G3]]>>, ["this", init]
// CIR: %[[RETVAL:.*]] = cir.alloca !cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> !s32i>>, !cir.ptr<!cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> !s32i>>>, ["__retval"]
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ALLOCA]]
@@ -101,7 +101,7 @@ int g3() {
// CIR: %[[RET:.*]] = cir.load %[[RETVAL]]
// CIR: cir.return %[[RET]]
-// LLVM: define internal ptr @"_ZZ2g3vENK3$_0cvPFiRKiEEv"(ptr %[[THIS_ARG:.*]]) {
+// LLVM: define internal ptr @"_ZZ2g3vENK3$_0cvPFiRKiEEv"(ptr %[[THIS_ARG:.*]]){{.*}} {
// LLVM: %[[THIS_ALLOCA:.*]] = alloca ptr
// LLVM: %[[RETVAL:.*]] = alloca ptr
// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
@@ -112,7 +112,7 @@ int g3() {
// In OGCG, the _ZZ2g3vENK3$_0cvPFiRKiEEv function is emitted just after the _Z2g3v function, see above.
-// CIR: cir.func{{.*}} @_Z2g3v() -> !s32i {
+// CIR: cir.func{{.*}} @_Z2g3v() -> !s32i{{.*}} {
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: %[[FN_ADDR:.*]] = cir.alloca !cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> !s32i>>, !cir.ptr<!cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> !s32i>>>, ["fn", init]
// CIR: %[[TASK:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["task", init]
@@ -145,7 +145,7 @@ int g3() {
// CIR: cir.return %[[RET]]
// CIR: }
-// LLVM: define dso_local i32 @_Z2g3v() {
+// LLVM: define dso_local i32 @_Z2g3v(){{.*}} {
// LLVM: %[[LAM_ALLOCA:.*]] = alloca %[[REC_LAM_G3]]
// LLVM: %[[REF_TMP1:.*]] = alloca i32
// LLVM: %[[RETVAL:.*]] = alloca i32
diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp
index 033adc6..0c32ceb1 100644
--- a/clang/test/CIR/CodeGen/lambda.cpp
+++ b/clang/test/CIR/CodeGen/lambda.cpp
@@ -13,13 +13,13 @@ void fn() {
a();
}
-// CIR: cir.func lambda internal private dso_local @_ZZ2fnvENK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_FN_A:.*]]> {{.*}})
+// CIR: cir.func lambda internal private dso_local @_ZZ2fnvENK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_FN_A:.*]]> {{.*}}) {{.*}} {
// CIR: %[[THIS:.*]] = cir.alloca !cir.ptr<![[REC_LAM_FN_A]]>, !cir.ptr<!cir.ptr<![[REC_LAM_FN_A]]>>, ["this", init]
// CIR: cir.store %[[THIS_ARG]], %[[THIS]]
// CIR: cir.load %[[THIS]]
// CIR: cir.return
-// CIR: cir.func dso_local @_Z2fnv()
+// CIR: cir.func dso_local @_Z2fnv() {{.*}} {
// CIR: %[[A:.*]] = cir.alloca ![[REC_LAM_FN_A]], !cir.ptr<![[REC_LAM_FN_A]]>, ["a"]
// CIR: cir.call @_ZZ2fnvENK3$_0clEv(%[[A]])
@@ -52,7 +52,7 @@ void l0() {
a();
}
-// CIR: cir.func lambda internal private dso_local @_ZZ2l0vENK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_L0_A:.*]]> {{.*}})
+// CIR: cir.func lambda internal private dso_local @_ZZ2l0vENK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_L0_A:.*]]> {{.*}}) {{.*}} {
// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<![[REC_LAM_L0_A]]>, !cir.ptr<!cir.ptr<![[REC_LAM_L0_A]]>>, ["this", init] {alignment = 8 : i64}
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
@@ -66,7 +66,7 @@ void l0() {
// CIR: cir.store{{.*}} %[[I_PLUS_ONE]], %[[I_ADDR]]
// CIR: cir.return
-// CIR: cir.func {{.*}} @_Z2l0v()
+// CIR: cir.func {{.*}} @_Z2l0v() {{.*}} {
// CIR: %[[I:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i"]
// CIR: %[[A:.*]] = cir.alloca ![[REC_LAM_L0_A]], !cir.ptr<![[REC_LAM_L0_A]]>, ["a", init]
// CIR: %[[I_ADDR:.*]] = cir.get_member %[[A]][0] {name = "i"}
@@ -124,7 +124,7 @@ auto g() {
};
}
-// CIR: cir.func dso_local @_Z1gv() -> ![[REC_LAM_G:.*]] {
+// CIR: cir.func dso_local @_Z1gv() -> ![[REC_LAM_G:.*]] {{.*}} {
// CIR: %[[RETVAL:.*]] = cir.alloca ![[REC_LAM_G]], !cir.ptr<![[REC_LAM_G]]>, ["__retval"]
// CIR: %[[I_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init]
// CIR: %[[TWELVE:.*]] = cir.const #cir.int<12> : !s32i
@@ -166,7 +166,7 @@ auto g2() {
}
// Should be same as above because of NRVO
-// CIR: cir.func dso_local @_Z2g2v() -> ![[REC_LAM_G2:.*]] {
+// CIR: cir.func dso_local @_Z2g2v() -> ![[REC_LAM_G2:.*]] {{.*}} {
// CIR: %[[RETVAL:.*]] = cir.alloca ![[REC_LAM_G2]], !cir.ptr<![[REC_LAM_G2]]>, ["__retval", init]
// CIR: %[[I_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init]
// CIR: %[[TWELVE:.*]] = cir.const #cir.int<12> : !s32i
@@ -199,7 +199,7 @@ int f() {
return g2()();
}
-// CIR:cir.func lambda internal private dso_local @_ZZ2g2vENK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_G2]]> {{.*}}) -> !s32i
+// CIR:cir.func lambda internal private dso_local @_ZZ2g2vENK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_G2]]> {{.*}}) -> !s32i {{.*}} {
// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<![[REC_LAM_G2]]>, !cir.ptr<!cir.ptr<![[REC_LAM_G2]]>>, ["this", init]
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
@@ -217,7 +217,7 @@ int f() {
// CIR: %[[RET:.*]] = cir.load %[[RETVAL]]
// CIR: cir.return %[[RET]]
-// CIR: cir.func dso_local @_Z1fv() -> !s32i
+// CIR: cir.func dso_local @_Z1fv() -> !s32i {{.*}} {
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: %[[SCOPE_RET:.*]] = cir.scope {
// CIR: %[[TMP:.*]] = cir.alloca ![[REC_LAM_G2]], !cir.ptr<![[REC_LAM_G2]]>, ["ref.tmp0"]
@@ -301,7 +301,7 @@ struct A {
// OGCG: call noundef i32 @_ZN1A3barEv(ptr {{.*}} %[[A_THIS]])
// lambda operator() in foo()
-// CIR: cir.func lambda comdat linkonce_odr @_ZZN1A3fooEvENKUlvE_clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_A:.*]]> {{.*}})
+// CIR: cir.func lambda comdat linkonce_odr @_ZZN1A3fooEvENKUlvE_clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_A:.*]]> {{.*}}) {{.*}} {
// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<![[REC_LAM_A]]>, !cir.ptr<!cir.ptr<![[REC_LAM_A]]>>, ["this", init]
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store{{.*}} %[[THIS_ARG]], %[[THIS_ADDR]]
@@ -328,7 +328,7 @@ struct A {
// The function above is defined after _ZN1A3barEv in OGCG, see below.
// A::foo()
-// CIR: cir.func {{.*}} @_ZN1A3fooEv(%[[THIS_ARG:.*]]: !cir.ptr<!rec_A> {{.*}}) -> !s32i
+// CIR: cir.func {{.*}} @_ZN1A3fooEv(%[[THIS_ARG:.*]]: !cir.ptr<!rec_A> {{.*}}) -> !s32i {{.*}} {
// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["this", init]
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
@@ -373,7 +373,7 @@ struct A {
// OGCG: ret i32 %[[LAM_RET]]
// lambda operator() in bar()
-// CIR: cir.func {{.*}} @_ZZN1A3barEvENKUlvE_clEv(%[[THIS_ARG2:.*]]: !cir.ptr<![[REC_LAM_PTR_A:.*]]> {{.*}}) -> !s32i
+// CIR: cir.func {{.*}} @_ZZN1A3barEvENKUlvE_clEv(%[[THIS_ARG2:.*]]: !cir.ptr<![[REC_LAM_PTR_A:.*]]> {{.*}}) -> !s32i {{.*}} {
// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<![[REC_LAM_PTR_A]]>, !cir.ptr<!cir.ptr<![[REC_LAM_PTR_A]]>>, ["this", init]
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store{{.*}} %[[THIS_ARG]], %[[THIS_ADDR]]
@@ -402,7 +402,7 @@ struct A {
// The function above is defined after _ZZN1A3fooEvENKUlvE_clEv in OGCG, see below.
// A::bar()
-// CIR: cir.func {{.*}} @_ZN1A3barEv(%[[THIS_ARG:.*]]: !cir.ptr<!rec_A> {{.*}}) -> !s32i
+// CIR: cir.func {{.*}} @_ZN1A3barEv(%[[THIS_ARG:.*]]: !cir.ptr<!rec_A> {{.*}}) -> !s32i {{.*}} {
// CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["this", init]
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
@@ -472,7 +472,7 @@ int test_lambda_this1(){
return x+y;
}
-// CIR: cir.func {{.*}} @_Z17test_lambda_this1v
+// CIR: cir.func {{.*}} @_Z17test_lambda_this1v{{.*}} {
// CIR: cir.call @_ZN1AC1Ev(%[[A_THIS:.*]]){{.*}} : (!cir.ptr<!rec_A>) -> ()
// CIR: cir.call @_ZN1A3fooEv(%[[A_THIS]]){{.*}} : (!cir.ptr<!rec_A>) -> !s32i
// CIR: cir.call @_ZN1A3barEv(%[[A_THIS]]){{.*}} : (!cir.ptr<!rec_A>) -> !s32i
diff --git a/clang/test/CIR/CodeGen/linkage-spec.cpp b/clang/test/CIR/CodeGen/linkage-spec.cpp
index eb6c7b0..1affecd 100644
--- a/clang/test/CIR/CodeGen/linkage-spec.cpp
+++ b/clang/test/CIR/CodeGen/linkage-spec.cpp
@@ -1,42 +1,42 @@
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - 2>&1 | FileCheck %s
extern "C" void TopLevelC(){}
-// CHECK: cir.func{{.*}} @TopLevelC() {
+// CHECK: cir.func dso_local @TopLevelC() inline(never) {
extern "C++" void TopLevelCpp(){}
-// CHECK: cir.func{{.*}} @_Z11TopLevelCppv() {
+// CHECK: cir.func dso_local @_Z11TopLevelCppv() inline(never) {
extern "C++" {
void ExternCppEmpty(){}
- // CHECK: cir.func{{.*}} @_Z14ExternCppEmptyv() {
+ // CHECK: cir.func dso_local @_Z14ExternCppEmptyv() inline(never) {
extern "C" void ExternCpp_C(){}
- // CHECK: cir.func{{.*}} @ExternCpp_C() {
+ // CHECK: cir.func dso_local @ExternCpp_C() inline(never) {
extern "C++" void ExternCpp_Cpp(){}
- // CHECK: cir.func{{.*}} @_Z13ExternCpp_Cppv() {
+ // CHECK: cir.func dso_local @_Z13ExternCpp_Cppv() inline(never) {
extern "C" {
void ExternCpp_CEmpty(){}
- // CHECK: cir.func{{.*}} @ExternCpp_CEmpty() {
+ // CHECK: cir.func dso_local @ExternCpp_CEmpty() inline(never) {
extern "C" void ExternCpp_C_C(){}
- // CHECK: cir.func{{.*}} @ExternCpp_C_C() {
+ // CHECK: cir.func dso_local @ExternCpp_C_C() inline(never) {
extern "C++" void ExternCpp_C_Cpp(){}
- // CHECK: cir.func{{.*}} @_Z15ExternCpp_C_Cppv() {
+ // CHECK: cir.func dso_local @_Z15ExternCpp_C_Cppv() inline(never) {
}
}
extern "C" {
void ExternCEmpty(){}
- // CHECK: cir.func{{.*}} @ExternCEmpty() {
+ // CHECK: cir.func dso_local @ExternCEmpty() inline(never) {
extern "C" void ExternC_C(){}
- // CHECK: cir.func{{.*}} @ExternC_C() {
+ // CHECK: cir.func dso_local @ExternC_C() inline(never) {
extern "C++" void ExternC_Cpp(){}
- // CHECK: cir.func{{.*}} @_Z11ExternC_Cppv() {
+ // CHECK: cir.func dso_local @_Z11ExternC_Cppv() inline(never) {
extern "C++" {
void ExternC_CppEmpty(){}
- // CHECK: cir.func{{.*}} @_Z16ExternC_CppEmptyv() {
+ // CHECK: cir.func dso_local @_Z16ExternC_CppEmptyv() inline(never) {
extern "C" void ExternC_Cpp_C(){}
- // CHECK: cir.func{{.*}} @ExternC_Cpp_C() {
+ // CHECK: cir.func dso_local @ExternC_Cpp_C() inline(never) {
extern "C++" void ExternC_Cpp_Cpp(){}
- // CHECK: cir.func{{.*}} @_Z15ExternC_Cpp_Cppv() {
+ // CHECK: cir.func dso_local @_Z15ExternC_Cpp_Cppv() inline(never) {
}
}
diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp
index b932f9d..3d28666 100644
--- a/clang/test/CIR/CodeGen/loop.cpp
+++ b/clang/test/CIR/CodeGen/loop.cpp
@@ -24,7 +24,7 @@ void l0() {
// CIR: cir.return
// CIR: }
-// LLVM: define{{.*}} void @_Z2l0v()
+// LLVM: define{{.*}} void @_Z2l0v(){{.*}}
// LLVM: br label %[[LABEL1:.*]]
// LLVM: [[LABEL1]]:
// LLVM: br label %[[LABEL2:.*]]
@@ -67,7 +67,7 @@ void l1() {
// CIR-NEXT: cir.return
// CIR-NEXT: }
-// LLVM: define{{.*}} void @_Z2l1v()
+// LLVM: define{{.*}} void @_Z2l1v(){{.*}}
// LLVM: %[[I:.*]] = alloca i32, i64 1, align 4
// LLVM: br label %[[LABEL1:.*]]
// LLVM: [[LABEL1]]:
@@ -117,7 +117,7 @@ void l2() {
// CIR-NEXT: cir.return
// CIR-NEXT: }
-// LLVM: define{{.*}} void @_Z2l2v()
+// LLVM: define{{.*}} void @_Z2l2v(){{.*}}
// LLVM: %[[I:.*]] = alloca i32, i64 1, align 4
// LLVM: br label %[[LABEL1:.*]]
// LLVM: [[LABEL1]]:
@@ -165,7 +165,7 @@ void l3() {
// CIR-NEXT: cir.return
// CIR-NEXT: }
-// LLVM: define{{.*}} void @_Z2l3v()
+// LLVM: define{{.*}} void @_Z2l3v(){{.*}}
// LLVM: %[[I:.*]] = alloca i32, i64 1, align 4
// LLVM: br label %[[LABEL1:.*]]
// LLVM: [[LABEL1]]:
@@ -231,7 +231,7 @@ void l4() {
// CIR: }
// CIR: }
-// LLVM: define{{.*}} void @_Z2l4v() {
+// LLVM: define{{.*}} void @_Z2l4v(){{.*}} {
// LLVM: %[[RANGE_ADDR:.*]] = alloca ptr
// LLVM: %[[BEGIN_ADDR:.*]] = alloca ptr
// LLVM: %[[END_ADDR:.*]] = alloca ptr
@@ -355,7 +355,7 @@ void l5() {
// CIR: }
// CIR: }
-// LLVM: define{{.*}} void @_Z2l5v() {
+// LLVM: define{{.*}} void @_Z2l5v(){{.*}} {
// LLVM: %[[ARR_ADDR:.*]] = alloca [4 x i32]
// LLVM: %[[RANGE_ADDR:.*]] = alloca ptr
// LLVM: %[[BEGIN_ADDR:.*]] = alloca ptr
@@ -448,7 +448,7 @@ void test_do_while_false() {
// CIR-NEXT: %[[FALSE:.*]] = cir.cast int_to_bool %[[ZERO]] : !s32i -> !cir.bool
// CIR-NEXT: cir.condition(%[[FALSE]])
-// LLVM: define{{.*}} void @_Z19test_do_while_falsev()
+// LLVM: define{{.*}} void @_Z19test_do_while_falsev(){{.*}}
// LLVM: br label %[[LABEL1:.*]]
// LLVM: [[LABEL1]]:
// LLVM: br label %[[LABEL3:.*]]
@@ -486,7 +486,7 @@ void test_empty_while_true() {
// CIR-NEXT: }
// CIR-NEXT: cir.yield
-// LLVM: define{{.*}} void @_Z21test_empty_while_truev()
+// LLVM: define{{.*}} void @_Z21test_empty_while_truev(){{.*}}
// LLVM: br label %[[LABEL1:.*]]
// LLVM: [[LABEL1]]:
// LLVM: br label %[[LABEL2:.*]]
@@ -539,7 +539,7 @@ void unreachable_after_continue() {
// CIR: cir.return
// CIR: }
-// LLVM: define{{.*}} void @_Z26unreachable_after_continuev()
+// LLVM: define{{.*}} void @_Z26unreachable_after_continuev(){{.*}}
// LLVM: %[[X:.*]] = alloca i32, i64 1, align 4
// LLVM: br label %[[LABEL1:.*]]
// LLVM: [[LABEL1]]:
@@ -599,7 +599,7 @@ void unreachable_after_break() {
// CIR: cir.return
// CIR: }
-// LLVM: define{{.*}} void @_Z23unreachable_after_breakv()
+// LLVM: define{{.*}} void @_Z23unreachable_after_breakv(){{.*}}
// LLVM: %[[X:.*]] = alloca i32, i64 1, align 4
// LLVM: br label %[[LABEL1:.*]]
// LLVM: [[LABEL1]]:
diff --git a/clang/test/CIR/CodeGen/member-functions.cpp b/clang/test/CIR/CodeGen/member-functions.cpp
index 8be2c7f..d46345db 100644
--- a/clang/test/CIR/CodeGen/member-functions.cpp
+++ b/clang/test/CIR/CodeGen/member-functions.cpp
@@ -19,7 +19,7 @@ void C::f() {}
void C::f2(int a, int b) {}
-// CIR: cir.func{{.*}} @_ZN1C2f2Eii(%[[THIS_ARG:.*]]: !cir.ptr<!rec_C> {{.*}}, %[[A_ARG:.*]]: !s32i {{.*}}, %[[B_ARG:.*]]: !s32i {{.*}}) {
+// CIR: cir.func{{.*}} @_ZN1C2f2Eii(%[[THIS_ARG:.*]]: !cir.ptr<!rec_C> {{.*}}, %[[A_ARG:.*]]: !s32i {{.*}}, %[[B_ARG:.*]]: !s32i {{.*}})
// CIR-NEXT: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>, ["this", init]
// CIR-NEXT: %[[A_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
// CIR-NEXT: %[[B_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
@@ -36,7 +36,7 @@ void test1() {
c.f2(1, 2);
}
-// CIR: cir.func{{.*}} @_Z5test1v() {
+// CIR: cir.func{{.*}} @_Z5test1v()
// CIR-NEXT: %[[C_ADDR:.*]] = cir.alloca !rec_C, !cir.ptr<!rec_C>, ["c"]
// CIR-NEXT: cir.call @_ZN1C1fEv(%[[C_ADDR]]) : (!cir.ptr<!rec_C>) -> ()
// CIR-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp
index 72c39d7..ce08c79 100644
--- a/clang/test/CIR/CodeGen/nrvo.cpp
+++ b/clang/test/CIR/CodeGen/nrvo.cpp
@@ -22,13 +22,13 @@ struct S f1() {
return s;
}
-// CIR: cir.func{{.*}} @_Z2f1v() -> !rec_S {
+// CIR: cir.func{{.*}} @_Z2f1v() -> !rec_S
// CIR-NEXT: %[[RETVAL:.*]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["__retval", init]
// CIR-NEXT: cir.call @_ZN1SC1Ev(%[[RETVAL]]) : (!cir.ptr<!rec_S>) -> ()
// CIR-NEXT: %[[RET:.*]] = cir.load %[[RETVAL]] : !cir.ptr<!rec_S>, !rec_S
// CIR-NEXT: cir.return %[[RET]]
-// CIR-NOELIDE: cir.func{{.*}} @_Z2f1v() -> !rec_S {
+// CIR-NOELIDE: cir.func{{.*}} @_Z2f1v() -> !rec_S
// CIR-NOELIDE-NEXT: %[[RETVAL:.*]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["__retval"]
// CIR-NOELIDE-NEXT: %[[S:.*]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["s", init]
// CIR-NOELIDE-NEXT: cir.call @_ZN1SC1Ev(%[[S]]) : (!cir.ptr<!rec_S>) -> ()
diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp
index eb38ee3..e7b7270 100644
--- a/clang/test/CIR/CodeGen/ternary.cpp
+++ b/clang/test/CIR/CodeGen/ternary.cpp
@@ -10,7 +10,7 @@ int x(int y) {
}
// CIR-LABEL: cir.func{{.*}} @_Z1xi(
-// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}) -> !s32i {
+// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}) -> !s32i
// CIR: [[Y:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["y", init] {alignment = 4 : i64}
// CIR: [[RETVAL:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
// CIR: cir.store %[[ARG0]], [[Y]] : !s32i, !cir.ptr<!s32i>
@@ -52,7 +52,7 @@ int foo(int a, int b) {
}
// CIR-LABEL: cir.func{{.*}} @_Z3fooii(
-// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}, %[[ARG1:.*]]: !s32i {{.*}}) -> !s32i {
+// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}, %[[ARG1:.*]]: !s32i {{.*}}) -> !s32i
// CIR: [[A:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init] {alignment = 4 : i64}
// CIR: [[B:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init] {alignment = 4 : i64}
// CIR: [[RETVAL:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
diff --git a/clang/test/CIR/CodeGen/throws.cpp b/clang/test/CIR/CodeGen/throws.cpp
index 89cb072..53af1ef 100644
--- a/clang/test/CIR/CodeGen/throws.cpp
+++ b/clang/test/CIR/CodeGen/throws.cpp
@@ -196,3 +196,51 @@ void throw_ext_vector_type() {
// OGCG: store <4 x i32> %[[TMP_A]], ptr %[[EXCEPTION_ADDR]], align 16
// OGCG: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIDv4_i, ptr null)
// OGCG: unreachable
+
+void throw_enum_expr() {
+ enum Test {
+ TestA,
+ TestB
+ };
+ throw Test::TestA;
+}
+
+// CIR: %[[EXCEPTION_ADDR:.*]] = cir.alloc.exception 4 -> !cir.ptr<!u32i>
+// CIR: %[[EXCEPTION_VALUE:.*]] = cir.const #cir.int<0> : !u32i
+// CIR: cir.store{{.*}} %[[EXCEPTION_VALUE]], %[[EXCEPTION_ADDR]] : !u32i, !cir.ptr<!u32i>
+// CIR: cir.throw %[[EXCEPTION_ADDR]] : !cir.ptr<!u32i>, @_ZTIZ15throw_enum_exprvE4Test
+// CIR: cir.unreachable
+
+// LLVM: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4)
+// LLVM: store i32 0, ptr %[[EXCEPTION_ADDR]], align 16
+// LLVM: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIZ15throw_enum_exprvE4Test, ptr null)
+// LLVM: unreachable
+
+// OGCG: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4)
+// OGCG: store i32 0, ptr %[[EXCEPTION_ADDR]], align 16
+// OGCG: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIZ15throw_enum_exprvE4Test, ptr null)
+// OGCG: unreachable
+
+void throw_enum_class_expr() {
+ enum class Test {
+ TestA,
+ TestB
+ };
+ throw Test::TestA;
+}
+
+// CIR: %[[EXCEPTION_ADDR:.*]] = cir.alloc.exception 4 -> !cir.ptr<!s32i>
+// CIR: %[[EXCEPTION_VALUE:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store{{.*}} %[[EXCEPTION_VALUE]], %[[EXCEPTION_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.throw %[[EXCEPTION_ADDR]] : !cir.ptr<!s32i>, @_ZTIZ21throw_enum_class_exprvE4Test
+// CIR: cir.unreachable
+
+// LLVM: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4)
+// LLVM: store i32 0, ptr %[[EXCEPTION_ADDR]], align 16
+// LLVM: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIZ21throw_enum_class_exprvE4Test, ptr null)
+// LLVM: unreachable
+
+// OGCG: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4)
+// OGCG: store i32 0, ptr %[[EXCEPTION_ADDR]], align 16
+// OGCG: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIZ21throw_enum_class_exprvE4Test, ptr null)
+// OGCG: unreachable
diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp
index 9e42323..8fcb2a4 100644
--- a/clang/test/CIR/CodeGen/vbase.cpp
+++ b/clang/test/CIR/CodeGen/vbase.cpp
@@ -57,7 +57,7 @@ void ppp() { B b; }
// OGCG: @_ZTV1B = linkonce_odr unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr inttoptr (i64 12 to ptr), ptr null, ptr @_ZTI1B] }, comdat, align 8
-// CIR: cir.func {{.*}}@_Z1fv() {
+// CIR: cir.func {{.*}}@_Z1fv()
// CIR: %[[D:.+]] = cir.alloca !rec_Derived, !cir.ptr<!rec_Derived>, ["d", init]
// CIR: cir.call @_ZN7DerivedC1Ev(%[[D]]) nothrow : (!cir.ptr<!rec_Derived>) -> ()
// CIR: %[[VPTR_PTR:.+]] = cir.vtable.get_vptr %[[D]] : !cir.ptr<!rec_Derived> -> !cir.ptr<!cir.vptr>
@@ -78,7 +78,7 @@ void ppp() { B b; }
// CIR: cir.call %[[FN]](%[[BASE_THIS]]) : (!cir.ptr<!cir.func<(!cir.ptr<!rec_Base>)>>, !cir.ptr<!rec_Base>) -> ()
// CIR: cir.return
-// CIR: cir.func {{.*}}@_Z1gv() {
+// CIR: cir.func {{.*}}@_Z1gv()
// CIR: %[[DF:.+]] = cir.alloca !rec_DerivedFinal, !cir.ptr<!rec_DerivedFinal>, ["df", init]
// CIR: cir.call @_ZN12DerivedFinalC1Ev(%[[DF]]) nothrow : (!cir.ptr<!rec_DerivedFinal>) -> ()
// CIR: %[[BASE_THIS_2:.+]] = cir.base_class_addr %[[DF]] : !cir.ptr<!rec_DerivedFinal> nonnull [0] -> !cir.ptr<!rec_Base>
@@ -89,7 +89,7 @@ void ppp() { B b; }
// CIR: cir.call %[[FN_2]](%[[BASE_THIS_2]]) : (!cir.ptr<!cir.func<(!cir.ptr<!rec_Base>)>>, !cir.ptr<!rec_Base>) -> ()
// CIR: cir.return
-// LLVM: define {{.*}}void @_Z1fv()
+// LLVM: define {{.*}}void @_Z1fv(){{.*}}
// LLVM: %[[D:.+]] = alloca {{.*}}
// LLVM: call void @_ZN7DerivedC1Ev(ptr %[[D]])
// LLVM: %[[VPTR_ADDR:.+]] = load ptr, ptr %[[D]]
@@ -102,7 +102,7 @@ void ppp() { B b; }
// LLVM: call void %[[VFN]](ptr %[[ADJ_THIS]])
// LLVM: ret void
-// LLVM: define {{.*}}void @_Z1gv()
+// LLVM: define {{.*}}void @_Z1gv(){{.*}}
// LLVM: %[[DF:.+]] = alloca {{.*}}
// LLVM: call void @_ZN12DerivedFinalC1Ev(ptr %[[DF]])
// LLVM: %[[VPTR2:.+]] = load ptr, ptr %[[DF]]
@@ -138,7 +138,7 @@ void ppp() { B b; }
// CIR: cir.store align(8) %[[VTABLE]], %[[B_VPTR]] : !cir.vptr, !cir.ptr<!cir.vptr>
// CIR: cir.return
-// LLVM: define{{.*}} void @_ZN1BC1Ev(ptr %[[THIS_ARG:.*]]) {
+// LLVM: define{{.*}} void @_ZN1BC1Ev(ptr %[[THIS_ARG:.*]]){{.*}} {
// LLVM: %[[THIS_ADDR:.*]] = alloca ptr
// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
diff --git a/clang/test/CIR/CodeGen/vla.c b/clang/test/CIR/CodeGen/vla.c
index e2adf45..b22c704 100644
--- a/clang/test/CIR/CodeGen/vla.c
+++ b/clang/test/CIR/CodeGen/vla.c
@@ -21,7 +21,7 @@ void f0(int len) {
// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]]
// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]]
-// LLVM: define{{.*}} void @f0(i32 %[[LEN_ARG:.*]]) {
+// LLVM: define{{.*}} void @f0(i32 %[[LEN_ARG:.*]])
// LLVM: %[[LEN_ADDR:.*]] = alloca i32
// LLVM: %[[SAVED_STACK:.*]] = alloca ptr
// LLVM: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]]
@@ -68,7 +68,7 @@ void f1(int len) {
// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]]
// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]]
-// LLVM: define{{.*}} void @f1(i32 %[[LEN_ARG:.*]]) {
+// LLVM: define{{.*}} void @f1(i32 %[[LEN_ARG:.*]])
// LLVM: %[[LEN_ADDR:.*]] = alloca i32
// LLVM: %[[SAVED_STACK:.*]] = alloca ptr
// LLVM: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]]
@@ -116,7 +116,7 @@ void f2(int len) {
// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]]
// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]]
-// LLVM: define{{.*}} void @f2(i32 %[[LEN_ARG:.*]]) {
+// LLVM: define{{.*}} void @f2(i32 %[[LEN_ARG:.*]])
// LLVM: %[[LEN_ADDR:.*]] = alloca i32
// LLVM: %[[SAVED_STACK:.*]] = alloca ptr
// LLVM: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]]
@@ -191,7 +191,7 @@ void f3(unsigned len) {
// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]]
// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]]
-// LLVM: define{{.*}} void @f3(i32 %[[LEN_ARG:.*]]) {
+// LLVM: define{{.*}} void @f3(i32 %[[LEN_ARG:.*]])
// LLVM: %[[SAVED_STACK2:.*]] = alloca ptr
// LLVM: %[[LEN_ADDR:.*]] = alloca i32
// LLVM: %[[SAVED_STACK:.*]] = alloca ptr
diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp
index dc30c32..f9a62e3 100644
--- a/clang/test/CIR/CodeGen/vtt.cpp
+++ b/clang/test/CIR/CodeGen/vtt.cpp
@@ -445,7 +445,7 @@ D::D() {}
// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
// CIR-COMMON: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
-// LLVM-COMMON: define {{.*}} void @_ZN1DC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) {
+// LLVM-COMMON: define {{.*}} void @_ZN1DC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]){{.*}} {
// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr
// LLVM-COMMON: %[[VTT_ADDR:.*]] = alloca ptr
// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
@@ -484,7 +484,7 @@ D::D() {}
// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_A> -> !cir.ptr<!cir.vptr>
// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
-// LLVM-COMMON: define {{.*}} void @_ZN1AC2Ev(ptr %[[THIS_ARG:.*]]) {
+// LLVM-COMMON: define {{.*}} void @_ZN1AC2Ev(ptr %[[THIS_ARG:.*]]){{.*}} {
// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr, i64 1, align 8
// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]], align 8
// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
diff --git a/clang/test/CIR/CodeGenOpenACC/cache.c b/clang/test/CIR/CodeGenOpenACC/cache.c
index 76651c1..d82230a 100644
--- a/clang/test/CIR/CodeGenOpenACC/cache.c
+++ b/clang/test/CIR/CodeGenOpenACC/cache.c
@@ -1,7 +1,7 @@
// RUN: %clang_cc1 -fopenacc -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir %s -o - | FileCheck %s
void acc_cache() {
- // CHECK: cir.func{{.*}} @acc_cache() {
+ // CHECK: cir.func{{.*}} @acc_cache()
int iArr[10];
// CHECK-NEXT: %[[IARR:.*]] = cir.alloca !cir.array<!s32i x 10>, !cir.ptr<!cir.array<!s32i x 10>>, ["iArr"]
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-copy.c b/clang/test/CIR/CodeGenOpenACC/combined-copy.c
index 9afbab5..31956b3 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-copy.c
+++ b/clang/test/CIR/CodeGenOpenACC/combined-copy.c
@@ -2,7 +2,7 @@
int global;
void acc_compute(int parmVar) {
- // CHECK: cir.func{{.*}} @acc_compute(%[[ARG:.*]]: !s32i{{.*}}) {
+ // CHECK: cir.func{{.*}} @acc_compute(%[[ARG:.*]]: !s32i{{.*}})
// CHECK-NEXT: %[[PARM:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["parmVar", init]
int localVar1;
short localVar2;
@@ -1082,7 +1082,7 @@ typedef struct OuterTy {
} Outer;
void copy_member_of_array_element_member() {
- // CHECK: cir.func{{.*}} @copy_member_of_array_element_member() {
+ // CHECK: cir.func{{.*}} @copy_member_of_array_element_member()
Outer outer;
// CHECK-NEXT: %[[OUTER:.*]] = cir.alloca !rec_OuterTy, !cir.ptr<!rec_OuterTy>, ["outer"]
@@ -1104,7 +1104,7 @@ void copy_member_of_array_element_member() {
}
void modifier_list() {
- // CHECK: cir.func{{.*}} @modifier_list() {
+ // CHECK: cir.func{{.*}} @modifier_list()
int localVar;
// CHECK-NEXT: %[[LOCALVAR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["localVar"]
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
index aa1103d..94f3f1a 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
@@ -324,7 +324,7 @@ struct HasDtor {
//
extern "C" void acc_combined() {
- // CHECK: cir.func{{.*}} @acc_combined() {
+ // CHECK: cir.func{{.*}} @acc_combined()
int someInt;
// CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp
index 7a7338c..ee82757 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp
@@ -158,7 +158,7 @@ struct HasDtor {
// CHECK-NEXT: }
extern "C" void acc_combined() {
- // CHECK: cir.func{{.*}} @acc_combined() {
+ // CHECK: cir.func{{.*}} @acc_combined()
int someInt;
// CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-copy.c b/clang/test/CIR/CodeGenOpenACC/compute-copy.c
index d7676d6..41e594e 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-copy.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-copy.c
@@ -2,7 +2,7 @@
int global;
void acc_compute(int parmVar) {
- // CHECK: cir.func{{.*}} @acc_compute(%[[ARG:.*]]: !s32i{{.*}}) {
+ // CHECK: cir.func{{.*}} @acc_compute(%[[ARG:.*]]: !s32i{{.*}})
// CHECK-NEXT: %[[PARM:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["parmVar", init]
int localVar1;
short localVar2;
@@ -899,7 +899,7 @@ void acc_compute_members() {
}
void modifier_list() {
- // CHECK: cir.func{{.*}} @modifier_list() {
+ // CHECK: cir.func{{.*}} @modifier_list()
int localVar;
// CHECK-NEXT: %[[LOCALVAR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["localVar"]
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
index 184f904..52342e7 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
@@ -142,7 +142,7 @@ struct NoCopyConstruct {};
// CHECK-NEXT: }
void acc_compute() {
- // CHECK: cir.func{{.*}} @acc_compute() {
+ // CHECK: cir.func{{.*}} @acc_compute()
int someInt;
// CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
index e3f091a..a2c6c38 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
@@ -324,7 +324,7 @@ struct HasDtor {
//
extern "C" void acc_compute() {
- // CHECK: cir.func{{.*}} @acc_compute() {
+ // CHECK: cir.func{{.*}} @acc_compute()
int someInt;
// CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c
index 34b8b69..943539e 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c
@@ -45,7 +45,7 @@ struct NoCopyConstruct {};
// CHECK-NEXT: }
void acc_compute() {
- // CHECK: cir.func{{.*}} @acc_compute() {
+ // CHECK: cir.func{{.*}} @acc_compute()
int someInt;
// CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp
index 8262a31..f0bd98c 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp
@@ -146,7 +146,7 @@ struct HasDtor {
// CHECK-NEXT: }
extern "C" void acc_compute() {
- // CHECK: cir.func{{.*}} @acc_compute() {
+ // CHECK: cir.func{{.*}} @acc_compute()
int someInt;
// CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp
index 7bbc58109..423b42b 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp
@@ -158,7 +158,7 @@ struct HasDtor {
// CHECK-NEXT: }
extern "C" void acc_loop() {
- // CHECK: cir.func{{.*}} @acc_loop() {
+ // CHECK: cir.func{{.*}} @acc_loop()
int someInt;
// CHECK-NEXT: %[[SOMEINT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["someInt"]
diff --git a/clang/test/CIR/IR/inline-attrs.cir b/clang/test/CIR/IR/inline-attrs.cir
new file mode 100644
index 0000000..f525abe
--- /dev/null
+++ b/clang/test/CIR/IR/inline-attrs.cir
@@ -0,0 +1,33 @@
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
+
+!s32i = !cir.int<s, 32>
+
+module {
+ cir.func @noinline_func(%arg0: !s32i) -> !s32i inline(never) {
+ cir.return %arg0 : !s32i
+ }
+ cir.func @always_inline_func(%arg0: !s32i) -> !s32i inline(always) {
+ cir.return %arg0 : !s32i
+ }
+ cir.func @inline_hint_func(%arg0: !s32i) -> !s32i inline(hint) {
+ cir.return %arg0 : !s32i
+ }
+ cir.func @regular_func(%arg0: !s32i) -> !s32i {
+ cir.return %arg0 : !s32i
+ }
+ cir.func dso_local @noinline_with_attrs(%arg0: !s32i) -> !s32i inline(never) {
+ cir.return %arg0 : !s32i
+ }
+ cir.func private @noinline_decl(!s32i) -> !s32i inline(never)
+ cir.func private @always_inline_decl(!s32i) -> !s32i inline(always)
+ cir.func private @inline_hint_decl(!s32i) -> !s32i inline(hint)
+}
+
+// CHECK: cir.func @noinline_func(%arg0: !s32i) -> !s32i inline(never)
+// CHECK: cir.func @always_inline_func(%arg0: !s32i) -> !s32i inline(always)
+// CHECK: cir.func @inline_hint_func(%arg0: !s32i) -> !s32i inline(hint)
+// CHECK: cir.func @regular_func(%arg0: !s32i) -> !s32i {
+// CHECK: cir.func dso_local @noinline_with_attrs(%arg0: !s32i) -> !s32i inline(never)
+// CHECK: cir.func private @noinline_decl(!s32i) -> !s32i inline(never)
+// CHECK: cir.func private @always_inline_decl(!s32i) -> !s32i inline(always)
+// CHECK: cir.func private @inline_hint_decl(!s32i) -> !s32i inline(hint)
diff --git a/clang/test/CIR/IR/invalid-try-catch.cir b/clang/test/CIR/IR/invalid-try-catch.cir
index 04a4d25..94df4b6 100644
--- a/clang/test/CIR/IR/invalid-try-catch.cir
+++ b/clang/test/CIR/IR/invalid-try-catch.cir
@@ -40,10 +40,11 @@ module {
cir.func dso_local @invalid_catch_empty_block() {
cir.scope {
- // expected-error @below {{'cir.try' op region #1 ('handler_regions') failed to verify constraint: region with at least 1 blocks}}
cir.try {
cir.yield
- } catch all {
+ }
+ // expected-error @below {{'cir.try' handler region shall not be empty}}
+ catch all {
}
}
cir.return
diff --git a/clang/test/CIR/Lowering/basic.cpp b/clang/test/CIR/Lowering/basic.cpp
index 5642cef..63beb0a 100644
--- a/clang/test/CIR/Lowering/basic.cpp
+++ b/clang/test/CIR/Lowering/basic.cpp
@@ -5,7 +5,7 @@ int f1() {
return i;
}
-// CHECK: define{{.*}} i32 @_Z2f1v() {
+// CHECK: define{{.*}} i32 @_Z2f1v(){{.*}} {
// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
// CHECK: %[[I_PTR:.*]] = alloca i32, i64 1, align 4
// CHECK: %[[I:.*]] = load i32, ptr %[[I_PTR]], align 4
@@ -18,7 +18,7 @@ int f2() {
return i;
}
-// CHECK: define{{.*}} i32 @_Z2f2v() {
+// CHECK: define{{.*}} i32 @_Z2f2v(){{.*}} {
// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
// CHECK: %[[I_PTR:.*]] = alloca i32, i64 1, align 4
// CHECK: store i32 2, ptr %[[I_PTR]], align 4
diff --git a/clang/test/CIR/Lowering/func-simple.cpp b/clang/test/CIR/Lowering/func-simple.cpp
index 96306bab..df5b007 100644
--- a/clang/test/CIR/Lowering/func-simple.cpp
+++ b/clang/test/CIR/Lowering/func-simple.cpp
@@ -23,7 +23,7 @@ int scopes() {
}
}
}
-// CHECK: define{{.*}} i32 @_Z6scopesv() {
+// CHECK: define{{.*}} i32 @_Z6scopesv(){{.*}} {
// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
// CHECK: br label %[[LABEL1:.*]]
// CHECK: [[LABEL1]]:
@@ -40,7 +40,7 @@ int scopes() {
// CHECK: }
long longfunc() { return 42l; }
-// CHECK: define{{.*}} i64 @_Z8longfuncv() {
+// CHECK: define{{.*}} i64 @_Z8longfuncv(){{.*}} {
// CHECK: %[[RV:.*]] = alloca i64, i64 1, align 8
// CHECK: store i64 42, ptr %[[RV]], align 8
// CHECK: %[[R:.*]] = load i64, ptr %[[RV]], align 8
@@ -48,7 +48,7 @@ long longfunc() { return 42l; }
// CHECK: }
unsigned unsignedfunc() { return 42u; }
-// CHECK: define{{.*}} i32 @_Z12unsignedfuncv() {
+// CHECK: define{{.*}} i32 @_Z12unsignedfuncv(){{.*}} {
// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
// CHECK: store i32 42, ptr %[[RV]], align 4
// CHECK: %[[R:.*]] = load i32, ptr %[[RV]], align 4
@@ -56,7 +56,7 @@ unsigned unsignedfunc() { return 42u; }
// CHECK: }
unsigned long long ullfunc() { return 42ull; }
-// CHECK: define{{.*}} i64 @_Z7ullfuncv() {
+// CHECK: define{{.*}} i64 @_Z7ullfuncv(){{.*}} {
// CHECK: %[[RV:.*]] = alloca i64, i64 1, align 8
// CHECK: store i64 42, ptr %[[RV]], align 8
// CHECK: %[[R:.*]] = load i64, ptr %[[RV]], align 8
@@ -64,7 +64,7 @@ unsigned long long ullfunc() { return 42ull; }
// CHECK: }
bool boolfunc() { return true; }
-// CHECK: define{{.*}} i1 @_Z8boolfuncv() {
+// CHECK: define{{.*}} i1 @_Z8boolfuncv(){{.*}} {
// CHECK: %[[RV:.*]] = alloca i8, i64 1, align 1
// CHECK: store i8 1, ptr %[[RV]], align 1
// CHECK: %[[R8:.*]] = load i8, ptr %[[RV]], align 1
diff --git a/clang/test/CIR/func-simple.cpp b/clang/test/CIR/func-simple.cpp
index c9cb5c5..8894757 100644
--- a/clang/test/CIR/func-simple.cpp
+++ b/clang/test/CIR/func-simple.cpp
@@ -2,17 +2,17 @@
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s
void empty() { }
-// CHECK: cir.func{{.*}} @_Z5emptyv() {
+// CHECK: cir.func{{.*}} @_Z5emptyv()
// CHECK: cir.return
// CHECK: }
void voidret() { return; }
-// CHECK: cir.func{{.*}} @_Z7voidretv() {
+// CHECK: cir.func{{.*}} @_Z7voidretv()
// CHECK: cir.return
// CHECK: }
int intfunc() { return 42; }
-// CHECK: cir.func{{.*}} @_Z7intfuncv() -> !s32i {
+// CHECK: cir.func{{.*}} @_Z7intfuncv() -> !s32i
// CHECK: %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
// CHECK: %1 = cir.const #cir.int<42> : !s32i
// CHECK: cir.store %1, %0 : !s32i, !cir.ptr<!s32i>
@@ -27,7 +27,7 @@ int scopes() {
}
}
}
-// CHECK: cir.func{{.*}} @_Z6scopesv() -> !s32i {
+// CHECK: cir.func{{.*}} @_Z6scopesv() -> !s32i
// CHECK: %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
// CHECK: cir.scope {
// CHECK: cir.scope {
@@ -68,7 +68,7 @@ unsigned long long ullfunc() { return 42ull; }
// CHECK: }
bool boolfunc() { return true; }
-// CHECK: cir.func{{.*}} @_Z8boolfuncv() -> !cir.bool {
+// CHECK: cir.func{{.*}} @_Z8boolfuncv() -> !cir.bool
// CHECK: %0 = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["__retval"] {alignment = 1 : i64}
// CHECK: %1 = cir.const #true
// CHECK: cir.store %1, %0 : !cir.bool, !cir.ptr<!cir.bool>
@@ -77,7 +77,7 @@ bool boolfunc() { return true; }
// CHECK: }
float floatfunc() { return 42.42f; }
-// CHECK: cir.func{{.*}} @_Z9floatfuncv() -> !cir.float {
+// CHECK: cir.func{{.*}} @_Z9floatfuncv() -> !cir.float
// CHECK: %0 = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["__retval"] {alignment = 4 : i64}
// CHECK: %1 = cir.const #cir.fp<4.242
// CHECK: cir.store %1, %0 : !cir.float, !cir.ptr<!cir.float>
@@ -86,7 +86,7 @@ float floatfunc() { return 42.42f; }
// CHECK: }
double doublefunc() { return 42.42; }
-// CHECK: cir.func{{.*}} @_Z10doublefuncv() -> !cir.double {
+// CHECK: cir.func{{.*}} @_Z10doublefuncv() -> !cir.double
// CHECK: %0 = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["__retval"] {alignment = 8 : i64}
// CHECK: %1 = cir.const #cir.fp<4.242
// CHECK: cir.store %1, %0 : !cir.double, !cir.ptr<!cir.double>
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c
new file mode 100644
index 0000000..d7734e0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c
new file mode 100644
index 0000000..68814f4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c
@@ -0,0 +1,134 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c
new file mode 100644
index 0000000..616455d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c
new file mode 100644
index 0000000..eec662a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c
new file mode 100644
index 0000000..dfdeb4e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c
new file mode 100644
index 0000000..96221c5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf4(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf2(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m1(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m2(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m4(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m8(op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c
new file mode 100644
index 0000000..8f8d82ba
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c
new file mode 100644
index 0000000..f4644df
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c
new file mode 100644
index 0000000..07053afa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c
new file mode 100644
index 0000000..88fb329
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c
new file mode 100644
index 0000000..d80ec3d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c
@@ -0,0 +1,189 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) {
+ return __riscv_vfmv_f_s_bf16mf4_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) {
+ return __riscv_vfmv_f_s_bf16mf2_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) {
+ return __riscv_vfmv_f_s_bf16m1_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) {
+ return __riscv_vfmv_f_s_bf16m2_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) {
+ return __riscv_vfmv_f_s_bf16m4_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) {
+ return __riscv_vfmv_f_s_bf16m8_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m8(src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c
new file mode 100644
index 0000000..a5afab9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c
@@ -0,0 +1,724 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..70c377b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c
@@ -0,0 +1,113 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..854e986
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c
@@ -0,0 +1,267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c
new file mode 100644
index 0000000..1848488
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c
new file mode 100644
index 0000000..e519e5a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c
new file mode 100644
index 0000000..47e1f44
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c
new file mode 100644
index 0000000..4b55b64
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c
new file mode 100644
index 0000000..1ffee73
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_m(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..964c486
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_m(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c
new file mode 100644
index 0000000..c7c3869e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c
new file mode 100644
index 0000000..778b8b83
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c
new file mode 100644
index 0000000..7de3089
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c
new file mode 100644
index 0000000..5fa285cc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c
new file mode 100644
index 0000000..b94d26b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_m(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c
new file mode 100644
index 0000000..06e8b49
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_m(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c
new file mode 100644
index 0000000..2423b0b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c
new file mode 100644
index 0000000..24d34f4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c
@@ -0,0 +1,899 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c
new file mode 100644
index 0000000..fb3e003
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c
@@ -0,0 +1,366 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c
new file mode 100644
index 0000000..be09003
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c
@@ -0,0 +1,486 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c
new file mode 100644
index 0000000..7490813
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c
@@ -0,0 +1,486 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c
new file mode 100644
index 0000000..6783ba4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c
@@ -0,0 +1,455 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c
new file mode 100644
index 0000000..6127a94
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c
@@ -0,0 +1,494 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c
new file mode 100644
index 0000000..f37dd31
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c
@@ -0,0 +1,494 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c
new file mode 100644
index 0000000..510ff91
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c
@@ -0,0 +1,899 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c
new file mode 100644
index 0000000..669d042
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c
new file mode 100644
index 0000000..b169efd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c
new file mode 100644
index 0000000..9aea7d2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c
new file mode 100644
index 0000000..40f0c27
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c
new file mode 100644
index 0000000..f64eee3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c
new file mode 100644
index 0000000..809ea56
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c
new file mode 100644
index 0000000..9d6b071c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c
new file mode 100644
index 0000000..2760f85
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c
@@ -0,0 +1,134 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c
new file mode 100644
index 0000000..ae3f1f2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c
new file mode 100644
index 0000000..db2184c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c
new file mode 100644
index 0000000..66497bf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c
new file mode 100644
index 0000000..1dc290b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c
new file mode 100644
index 0000000..1564d11
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c
new file mode 100644
index 0000000..0384e7d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c
new file mode 100644
index 0000000..306f189
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c
new file mode 100644
index 0000000..fffd83a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c
new file mode 100644
index 0000000..f85378f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) {
+ return __riscv_vfmv_f(src);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c
new file mode 100644
index 0000000..fb635d6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c
@@ -0,0 +1,724 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..1ad856d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c
@@ -0,0 +1,113 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..12d0893
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c
@@ -0,0 +1,267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c
new file mode 100644
index 0000000..6f7928b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c
new file mode 100644
index 0000000..97d2070
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c
new file mode 100644
index 0000000..404b4f8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c
new file mode 100644
index 0000000..3a520dd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c
new file mode 100644
index 0000000..462b6ac
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..051fde7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c
new file mode 100644
index 0000000..0494182
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c
new file mode 100644
index 0000000..615dedd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c
new file mode 100644
index 0000000..a895e5f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c
new file mode 100644
index 0000000..0187516
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c
new file mode 100644
index 0000000..4a76894
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c
new file mode 100644
index 0000000..f9f2dc0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c
new file mode 100644
index 0000000..ebcf6fa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c
new file mode 100644
index 0000000..124e7fb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c
@@ -0,0 +1,893 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c
new file mode 100644
index 0000000..0399a63
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c
@@ -0,0 +1,366 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c
new file mode 100644
index 0000000..2eb7fc8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c
@@ -0,0 +1,474 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c
new file mode 100644
index 0000000..28f5076
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c
@@ -0,0 +1,474 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c
new file mode 100644
index 0000000..8de49fa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c
@@ -0,0 +1,451 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c
new file mode 100644
index 0000000..7836931
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c
@@ -0,0 +1,480 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c
new file mode 100644
index 0000000..ca936af
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c
@@ -0,0 +1,480 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c
new file mode 100644
index 0000000..2e22e22
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c
@@ -0,0 +1,893 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c
new file mode 100644
index 0000000..29881c9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c
new file mode 100644
index 0000000..b8083c5e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c
new file mode 100644
index 0000000..b8749b3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c
new file mode 100644
index 0000000..724608c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c
new file mode 100644
index 0000000..1b0b898
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c
new file mode 100644
index 0000000..672c150
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c
new file mode 100644
index 0000000..6d55279
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c
new file mode 100644
index 0000000..8e6946d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c
new file mode 100644
index 0000000..2d4e481
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c
new file mode 100644
index 0000000..511e073
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c
new file mode 100644
index 0000000..f3698d4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c
new file mode 100644
index 0000000..bcaf2cb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf4_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf2_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m1_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m2_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m4_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m8_tu(maskedoff, op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c
new file mode 100644
index 0000000..911f879
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c
new file mode 100644
index 0000000..9575ad3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c
new file mode 100644
index 0000000..8e382f71
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c
new file mode 100644
index 0000000..716f056
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c
new file mode 100644
index 0000000..069ee6a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m1_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m8_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m1_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m8_tu(maskedoff, src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c
new file mode 100644
index 0000000..36d4fc3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c
@@ -0,0 +1,1577 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..8406684
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c
@@ -0,0 +1,233 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..4644eff
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c
@@ -0,0 +1,572 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c
new file mode 100644
index 0000000..93fd6ba
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c
new file mode 100644
index 0000000..d7e6b82
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c
new file mode 100644
index 0000000..e0c289d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c
new file mode 100644
index 0000000..05ccda3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c
new file mode 100644
index 0000000..3123692
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..8436f0e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c
new file mode 100644
index 0000000..7dd2bb6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c
new file mode 100644
index 0000000..b39a0be
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c
new file mode 100644
index 0000000..7542e78
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c
new file mode 100644
index 0000000..104149e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c
new file mode 100644
index 0000000..228dc1cd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c
new file mode 100644
index 0000000..9e6ff2b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c
new file mode 100644
index 0000000..b6fd94e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c
new file mode 100644
index 0000000..4bee376
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c
@@ -0,0 +1,2007 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c
new file mode 100644
index 0000000..9151319
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c
@@ -0,0 +1,765 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c
new file mode 100644
index 0000000..f67b100
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c
@@ -0,0 +1,1017 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c
new file mode 100644
index 0000000..6d78c74
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c
@@ -0,0 +1,1017 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c
new file mode 100644
index 0000000..9fcfe81
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c
@@ -0,0 +1,1015 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c
new file mode 100644
index 0000000..73cc822
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c
@@ -0,0 +1,1034 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c
new file mode 100644
index 0000000..6133230
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c
@@ -0,0 +1,1034 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c
new file mode 100644
index 0000000..9d9b0b0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c
@@ -0,0 +1,2007 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c
new file mode 100644
index 0000000..b96aae5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c
new file mode 100644
index 0000000..47d0427
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c
new file mode 100644
index 0000000..0a0ead2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c
new file mode 100644
index 0000000..27ddefe
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c
new file mode 100644
index 0000000..d5f4f77
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c
new file mode 100644
index 0000000..c2df947
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c
new file mode 100644
index 0000000..2bd3b39
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c
new file mode 100644
index 0000000..e2a993a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c
new file mode 100644
index 0000000..eb74271
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c
new file mode 100644
index 0000000..68d490d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c
new file mode 100644
index 0000000..5f682e8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c
new file mode 100644
index 0000000..9593ad5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c
new file mode 100644
index 0000000..f3ef3c3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c
new file mode 100644
index 0000000..0587c57
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c
new file mode 100644
index 0000000..2ad26f8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c
new file mode 100644
index 0000000..d1e726a9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c
new file mode 100644
index 0000000..9fd1ffc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c
new file mode 100644
index 0000000..c6cd0a5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c
@@ -0,0 +1,1539 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..0745633
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c
@@ -0,0 +1,233 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..b906c5f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c
@@ -0,0 +1,572 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c
new file mode 100644
index 0000000..cc487b4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c
new file mode 100644
index 0000000..f9c348b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c
new file mode 100644
index 0000000..83d35e8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c
new file mode 100644
index 0000000..f5282a1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c
new file mode 100644
index 0000000..f8e5a33
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..7c6c926
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c
new file mode 100644
index 0000000..c09caeb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c
new file mode 100644
index 0000000..c1f69932
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c
new file mode 100644
index 0000000..1b799d8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c
new file mode 100644
index 0000000..9c5f2af
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c
new file mode 100644
index 0000000..691302e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c
new file mode 100644
index 0000000..1238d22
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c
new file mode 100644
index 0000000..ea4f8f0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c
new file mode 100644
index 0000000..e5b7b8d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c
@@ -0,0 +1,1932 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c
new file mode 100644
index 0000000..7300104
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c
@@ -0,0 +1,765 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c
new file mode 100644
index 0000000..b05f8802
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c
@@ -0,0 +1,977 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c
new file mode 100644
index 0000000..93721f6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c
@@ -0,0 +1,977 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c
new file mode 100644
index 0000000..4a2b5e3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c
@@ -0,0 +1,975 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c
new file mode 100644
index 0000000..57e43344
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c
@@ -0,0 +1,994 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c
new file mode 100644
index 0000000..42da060
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c
@@ -0,0 +1,994 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c
new file mode 100644
index 0000000..1378bc9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c
@@ -0,0 +1,1932 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c
new file mode 100644
index 0000000..3945f82
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c
new file mode 100644
index 0000000..82586da
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c
new file mode 100644
index 0000000..75ccbbc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c
new file mode 100644
index 0000000..49ff1c9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c
new file mode 100644
index 0000000..24b3f9c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c
new file mode 100644
index 0000000..ca3e134
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c
index b798618..a505d70 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -1038,6 +1038,7 @@ __m256i test_mm256_mulhrs_epi16(__m256i a, __m256i b) {
// CHECK: call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mulhrs_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mulhrs_epi16((__m256i)(__v16hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600}, (__m256i)(__v16hi){+1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +5, -9, -13, +16, +18, -20, -21, -22, -22, +21, +20, -18, -16, +13, +9, -5));
__m256i test_mm256_mullo_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mullo_epi16
diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c
index fddf17d..55bf482 100644
--- a/clang/test/CodeGen/X86/avx512bw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bw-builtins.c
@@ -1596,18 +1596,24 @@ __m512i test_mm512_mulhrs_epi16(__m512i __A, __m512i __B) {
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
return _mm512_mulhrs_epi16(__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mulhrs_epi16((__m512i)(__v32hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600, -1700, -1800, +1900, +2000, -2100, -2200, +2300, +2400, -2500, -2600, +2700, +2800, -2900, -3000, +3100, +3200}, (__m512i)(__v32hi){+3200, -3100, +3000, -2900, +2800, -2700, +2600, -2500, +2400, -2300, +2200, -2100, +2000, -1900, +1800, -1700, +1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +10, -19, -27, +35, +43, -49, -56, -61, -66, +70, +74, -77, -79, +81, +82, -83, -83, +82, +81, -79, -77, +74, +70, -66, -61, +56, +49, -43, -35, +27, +19, -10));
+
__m512i test_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_mask_mulhrs_epi16
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mulhrs_epi16(__W,__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mask_mulhrs_epi16(_mm512_set1_epi16(1), 0x0000FFFF, (__m512i)(__v32hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600, -1700, -1800, +1900, +2000, -2100, -2200, +2300, +2400, -2500, -2600, +2700, +2800, -2900, -3000, +3100, +3200}, (__m512i)(__v32hi){+3200, -3100, +3000, -2900, +2800, -2700, +2600, -2500, +2400, -2300, +2200, -2100, +2000, -1900, +1800, -1700, +1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +10, -19, -27, +35, +43, -49, -56, -61, -66, +70, +74, -77, -79, +81, +82, -83, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1));
+
__m512i test_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_maskz_mulhrs_epi16
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_mulhrs_epi16(__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_mulhrs_epi16(0x0000FFFF, (__m512i)(__v32hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600, -1700, -1800, +1900, +2000, -2100, -2200, +2300, +2400, -2500, -2600, +2700, +2800, -2900, -3000, +3100, +3200}, (__m512i)(__v32hi){+3200, -3100, +3000, -2900, +2800, -2700, +2600, -2500, +2400, -2300, +2200, -2100, +2000, -1900, +1800, -1700, +1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +10, -19, -27, +35, +43, -49, -56, -61, -66, +70, +74, -77, -79, +81, +82, -83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+
__m512i test_mm512_mulhi_epi16(__m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_mulhi_epi16
// CHECK: @llvm.x86.avx512.pmulh.w.512
diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
index d569283..95e4d40 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
@@ -2061,6 +2061,7 @@ __m128i test_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_mulhrs_epi16(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mask_mulhrs_epi16(_mm_set1_epi16(1), 0x0F, (__m128i)(__v8hi){+100, +200, -300, -400, +500, +600, -700, +800}, (__m128i)(__v8hi){+8000, -7000, +6000, -5000, +4000, -3000, +2000, -1000}), +24, -43, -55, +61, +1, +1, +1, +1));
__m128i test_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
// CHECK-LABEL: test_mm_maskz_mulhrs_epi16
@@ -2068,6 +2069,7 @@ __m128i test_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_mulhrs_epi16(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8hi(_mm_maskz_mulhrs_epi16(0x0F, (__m128i)(__v8hi){+100, +200, -300, -400, +500, +600, -700, +800}, (__m128i)(__v8hi){+8000, -7000, +6000, -5000, +4000, -3000, +2000, -1000}), +24, -43, -55, +61, 0, 0, 0, 0));
__m256i test_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
// CHECK-LABEL: test_mm256_mask_mulhrs_epi16
@@ -2075,6 +2077,7 @@ __m256i test_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_mulhrs_epi16(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_mulhrs_epi16(_mm256_set1_epi16(1), 0xF00F, (__m256i)(__v16hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600}, (__m256i)(__v16hi){+1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +5, -9, -13, +16, +1, +1, +1, +1, +1, +1, +1, +1, -16, +13, +9, -5));
__m256i test_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
// CHECK-LABEL: test_mm256_maskz_mulhrs_epi16
@@ -2082,6 +2085,7 @@ __m256i test_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_mulhrs_epi16(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_mulhrs_epi16(0xF00F, (__m256i)(__v16hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600}, (__m256i)(__v16hi){+1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +5, -9, -13, +16, 0, 0, 0, 0, 0, 0, 0, 0, -16, +13, +9, -5));
__m128i test_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: test_mm_mask_mulhi_epu16
diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c
index d9041d4..c1ac57b 100644
--- a/clang/test/CodeGen/X86/mmx-builtins.c
+++ b/clang/test/CodeGen/X86/mmx-builtins.c
@@ -438,6 +438,7 @@ __m64 test_mm_mulhrs_pi16(__m64 a, __m64 b) {
// CHECK: call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(
return _mm_mulhrs_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_mulhrs_pi16((__m64)(__v4hi){+100, +200, -300, -400}, (__m64)(__v4hi){+30000, -20000, +10000, -5000}), +92, -122, -92, +61));
__m64 test_mm_mullo_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_mullo_pi16
diff --git a/clang/test/CodeGen/X86/ssse3-builtins.c b/clang/test/CodeGen/X86/ssse3-builtins.c
index 32abd9d..f70afc0 100644
--- a/clang/test/CodeGen/X86/ssse3-builtins.c
+++ b/clang/test/CodeGen/X86/ssse3-builtins.c
@@ -110,6 +110,7 @@ __m128i test_mm_mulhrs_epi16(__m128i a, __m128i b) {
// CHECK: call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mulhrs_epi16(a, b);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mulhrs_epi16((__m128i)(__v8hi){+100, +200, -300, -400, +500, +600, -700, +800}, (__m128i)(__v8hi){+8000, -7000, +6000, -5000, +4000, -3000, +2000, -1000}), +24, -43, -55, +61, +61, -55, -43, -24));
__m128i test_mm_shuffle_epi8(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_shuffle_epi8
diff --git a/clang/test/CodeGenCUDA/Inputs/cuda.h b/clang/test/CodeGenCUDA/Inputs/cuda.h
index dc85eae..e7ad784 100644
--- a/clang/test/CodeGenCUDA/Inputs/cuda.h
+++ b/clang/test/CodeGenCUDA/Inputs/cuda.h
@@ -13,6 +13,8 @@
#endif
#define __launch_bounds__(...) __attribute__((launch_bounds(__VA_ARGS__)))
#define __grid_constant__ __attribute__((grid_constant))
+#define __cluster_dims__(...) __attribute__((cluster_dims(__VA_ARGS__)))
+#define __no_cluster__ __attribute__((no_cluster))
#else
#define __constant__
#define __device__
@@ -22,6 +24,8 @@
#define __managed__
#define __launch_bounds__(...)
#define __grid_constant__
+#define __cluster_dims__(...)
+#define __no_cluster__
#endif
struct dim3 {
diff --git a/clang/test/CodeGenCUDA/cluster_dims.cu b/clang/test/CodeGenCUDA/cluster_dims.cu
new file mode 100644
index 0000000..00635e3
--- /dev/null
+++ b/clang/test/CodeGenCUDA/cluster_dims.cu
@@ -0,0 +1,38 @@
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx1250 -fcuda-is-device -emit-llvm -x hip -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -aux-triple amdgcn-amd-amdhsa -emit-llvm -x hip -o - %s | FileCheck --check-prefix=HOST %s
+
+#include "Inputs/cuda.h"
+
+const int constint = 4;
+
+// HOST-NOT: "amdgpu-cluster-dims"
+
+// CHECK: "amdgpu-cluster-dims"="2,2,2"
+__global__ void __cluster_dims__(2, 2, 2) test_literal_3d() {}
+
+// CHECK: "amdgpu-cluster-dims"="2,2,1"
+__global__ void __cluster_dims__(2, 2) test_literal_2d() {}
+
+// CHECK: "amdgpu-cluster-dims"="4,1,1"
+__global__ void __cluster_dims__(4) test_literal_1d() {}
+
+// CHECK: "amdgpu-cluster-dims"="4,2,1"
+__global__ void __cluster_dims__(constint, constint / 2, 1) test_constant() {}
+
+// CHECK: "amdgpu-cluster-dims"="0,0,0"
+__global__ void __no_cluster__ test_no_cluster() {}
+
+// CHECK: "amdgpu-cluster-dims"="7,1,1"
+template<unsigned a>
+__global__ void __cluster_dims__(a) test_template_1d() {}
+template __global__ void test_template_1d<7>();
+
+// CHECK: "amdgpu-cluster-dims"="2,6,1"
+template<unsigned a, unsigned b>
+__global__ void __cluster_dims__(a, b) test_template_2d() {}
+template __global__ void test_template_2d<2, 6>();
+
+// CHECK: "amdgpu-cluster-dims"="1,2,3"
+template<unsigned a, unsigned b, unsigned c>
+__global__ void __cluster_dims__(a, b, c) test_template_3d() {}
+template __global__ void test_template_3d<1, 2, 3>();
diff --git a/clang/test/CodeGenHLSL/Operators/logical-not.hlsl b/clang/test/CodeGenHLSL/Operators/logical-not.hlsl
new file mode 100644
index 0000000..0f9d067
--- /dev/null
+++ b/clang/test/CodeGenHLSL/Operators/logical-not.hlsl
@@ -0,0 +1,33 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.6-library -disable-llvm-passes -emit-llvm -finclude-default-header -fnative-half-type -o - %s | FileCheck %s
+
+// CHECK-LABEL: case1
+// CHECK: [[ToBool:%.*]] = icmp ne <2 x i32> {{.*}}, zeroinitializer
+// CHECK-NEXT: [[BoolCmp:%.*]] = icmp eq <2 x i1> [[ToBool]], zeroinitializer
+// CHECK-NEXT: {{.*}} = zext <2 x i1> [[BoolCmp]] to <2 x i32>
+export uint32_t2 case1(uint32_t2 b) {
+ return !b;
+}
+
+// CHECK-LABEL: case2
+// CHECK: [[ToBool:%.*]] = icmp ne <3 x i32> {{.*}}, zeroinitializer
+// CHECK-NEXT: [[BoolCmp:%.*]] = icmp eq <3 x i1> [[ToBool]], zeroinitializer
+// CHECK-NEXT: {{.*}} = zext <3 x i1> [[BoolCmp]] to <3 x i32>
+export int32_t3 case2(int32_t3 b) {
+ return !b;
+}
+
+// CHECK-LABEL: case3
+// CHECK: [[ToBool:%.*]] = fcmp reassoc nnan ninf nsz arcp afn une half {{.*}}, 0xH0000
+// CHECK-NEXT: [[BoolCmp:%.*]] = xor i1 [[ToBool]], true
+// CHECK-NEXT: {{.*}} = uitofp i1 [[BoolCmp]] to half
+export float16_t case3(float16_t b) {
+ return !b;
+}
+
+// CHECK-LABEL: case4
+// CHECK: [[ToBool:%.*]] = fcmp reassoc nnan ninf nsz arcp afn une <4 x float> {{.*}}, zeroinitializer
+// CHECK-NEXT: [[BoolCmp:%.*]] = icmp eq <4 x i1> [[ToBool]], zeroinitializer
+// CHECK-NEXT: {{.*}} = uitofp <4 x i1> [[BoolCmp]] to <4 x float>
+export float4 case4(float4 b) {
+ return !b;
+}
diff --git a/clang/test/CodeGenHLSL/resources/ByteAddressBuffers-methods.hlsl b/clang/test/CodeGenHLSL/resources/ByteAddressBuffers-methods.hlsl
new file mode 100644
index 0000000..9dd0228
--- /dev/null
+++ b/clang/test/CodeGenHLSL/resources/ByteAddressBuffers-methods.hlsl
@@ -0,0 +1,45 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -finclude-default-header -emit-llvm -disable-llvm-passes -o - %s | llvm-cxxfilt | FileCheck %s --check-prefixes=CHECK,DXIL
+// RUN-DISABLED: %clang_cc1 -triple spirv-vulkan-library -finclude-default-header -emit-llvm -disable-llvm-passes -o - %s | llvm-cxxfilt | FileCheck %s --check-prefixes=CHECK,SPIRV
+
+// NOTE: SPIRV codegen for resource methods is not yet implemented
+
+ByteAddressBuffer Buf : register(t0);
+RWByteAddressBuffer RWBuf : register(u0);
+
+// DXIL: %"class.hlsl::ByteAddressBuffer" = type { target("dx.RawBuffer", i8, 0, 0) }
+// DXIL: %"class.hlsl::RWByteAddressBuffer" = type { target("dx.RawBuffer", i8, 1, 0) }
+
+// DXIL: @Buf = internal global %"class.hlsl::ByteAddressBuffer" poison
+// DXIL: @RWBuf = internal global %"class.hlsl::RWByteAddressBuffer" poison
+
+export uint TestGetDimensions() {
+ uint dim1, dim2;
+ Buf.GetDimensions(dim1);
+ RWBuf.GetDimensions(dim2);
+ return dim1 + dim2;
+}
+
+// CHECK: define {{.*}} @TestGetDimensions()()
+// CHECK: call void @hlsl::ByteAddressBuffer::GetDimensions(unsigned int&)(ptr {{.*}} @Buf, ptr{{.*}})
+// CHECK: call void @hlsl::RWByteAddressBuffer::GetDimensions(unsigned int&)(ptr{{.*}} @RWBuf, ptr{{.*}})
+// CHECK: add
+// CHECK: ret
+
+// CHECK: define {{.*}} void @hlsl::ByteAddressBuffer::GetDimensions(unsigned int&)(ptr {{.*}} %this, {{.*}} %dim)
+// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::ByteAddressBuffer", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", i8, 0, 0), ptr %[[HANDLE_PTR]]
+// CHECK-NEXT: %[[DIMPTR:.*]] = load ptr, ptr %dim.addr
+// DXIL-NEXT: %[[DIM:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_i8_0_0t(target("dx.RawBuffer", i8, 0, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[DIM]], ptr %[[DIMPTR]]
+// CHECK-NEXT: ret void
+
+// CHECK: define {{.*}} void @hlsl::RWByteAddressBuffer::GetDimensions(unsigned int&)(ptr {{.*}} %this, ptr noalias {{.*}} %dim)
+// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::RWByteAddressBuffer", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", i8, 1, 0), ptr %[[HANDLE_PTR]]
+// CHECK-NEXT: %[[DIMPTR:.*]] = load ptr, ptr %dim.addr
+// DXIL-NEXT: %[[DIM:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_i8_1_0t(target("dx.RawBuffer", i8, 1, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[DIM]], ptr %[[DIMPTR]]
+// CHECK-NEXT: ret void
+
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_i8_0_0t(target("dx.RawBuffer", i8, 0, 0))
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_i8_1_0t(target("dx.RawBuffer", i8, 1, 0))
diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl
index 2cf6a10..1f248d0 100644
--- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl
+++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl
@@ -104,9 +104,57 @@ export float TestLoad() {
// CHECK-NEXT: %[[VAL:.*]] = load float, ptr %[[PTR]]
// CHECK-NEXT: ret float %[[VAL]]
+export uint TestGetDimensions() {
+ uint dim1, dim2, dim3, stride1, stride2, stride3;
+ SB1.GetDimensions(dim1, stride1);
+ RWSB2.GetDimensions(dim2, stride2);
+ CSB.GetDimensions(dim3, stride3);
+ return dim1 + dim2 + dim3 + stride1 + stride2 + stride3;
+}
+// CHECK: define noundef i32 @TestGetDimensions()()
+// CHECK: call void @hlsl::StructuredBuffer<float>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} @SB1, ptr {{.*}}, ptr {{.*}})
+// CHECK: call void @hlsl::RWStructuredBuffer<unsigned int vector[4]>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} @RWSB2, ptr {{.*}}, ptr {{.*}})
+// CHECK: call void @hlsl::ConsumeStructuredBuffer<double>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} @CSB, ptr {{.*}}, ptr {{.*}})
+// CHECK: add
+// CHECK: ret
+
+// CHECK: define {{.*}} void @hlsl::StructuredBuffer<float>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}}, ptr {{.*}} %numStructs, ptr {{.*}} %stride)
+// CHECK: %__handle = getelementptr inbounds nuw %"class.hlsl::StructuredBuffer", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", float, 0, 0), ptr %__handle
+// CHECK-NEXT: %[[NUMSTRUCTS_PTR:.*]] = load ptr, ptr %numStructs.addr
+// DXIL-NEXT: %[[NUMSTRUCTS:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f32_0_0t(target("dx.RawBuffer", float, 0, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[NUMSTRUCTS]], ptr %[[NUMSTRUCTS_PTR]]
+// CHECK-NEXT: %[[STRIDEPTR:.*]] = load ptr, ptr %stride.addr
+// CHECK-NEXT: store i32 4, ptr %[[STRIDEPTR]]
+// CHECK-NEXT: ret void
+
+// CHECK: define {{.*}} void @hlsl::RWStructuredBuffer<unsigned int vector[4]>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} %this, {{.*}} %numStructs, {{.*}} %stride)
+// CHECK: %__handle = getelementptr inbounds nuw %"class.hlsl::RWStructuredBuffer.0", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", <4 x i32>, 1, 0), ptr %__handle
+// CHECK-NEXT: %[[NUMSTRUCTS_PTR:.*]] = load ptr, ptr %numStructs.addr
+// DXIL-NEXT: %[[NUMSTRUCTS:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_v4i32_1_0t(target("dx.RawBuffer", <4 x i32>, 1, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[NUMSTRUCTS]], ptr %[[NUMSTRUCTS_PTR]]
+// CHECK-NEXT: %[[STRIDEPTR:.*]] = load ptr, ptr %stride.addr
+// CHECK-NEXT: store i32 16, ptr %[[STRIDEPTR]]
+// CHECK-NEXT: ret void
+
+// CHECK: define {{.*}} void @hlsl::ConsumeStructuredBuffer<double>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} %this, {{.*}} %numStructs, {{.*}} %stride)
+// CHECK: %__handle = getelementptr inbounds nuw %"class.hlsl::ConsumeStructuredBuffer", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", double, 1, 0), ptr %__handle
+// CHECK-NEXT: %[[NUMSTRUCTS_PTR:.*]] = load ptr, ptr %numStructs.addr
+// DXIL-NEXT: %[[NUMSTRUCTS:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f64_1_0t(target("dx.RawBuffer", double, 1, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[NUMSTRUCTS]], ptr %[[NUMSTRUCTS_PTR]]
+// CHECK-NEXT: %[[STRIDEPTR:.*]] = load ptr, ptr %stride.addr
+// CHECK-NEXT: store i32 8, ptr %[[STRIDEPTR]]
+// CHECK-NEXT: ret void
+
// DXIL: declare i32 @llvm.dx.resource.updatecounter.tdx.RawBuffer_f32_1_0t(target("dx.RawBuffer", float, 1, 0), i8)
// DXIL: declare i32 @llvm.dx.resource.updatecounter.tdx.RawBuffer_v4i32_1_0t(target("dx.RawBuffer", <4 x i32>, 1, 0), i8)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_f32_1_0t(target("dx.RawBuffer", float, 1, 0), i32)
// DXIL: declare i32 @llvm.dx.resource.updatecounter.tdx.RawBuffer_f64_1_0t(target("dx.RawBuffer", double, 1, 0), i8)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_f64_1_0t(target("dx.RawBuffer", double, 1, 0), i32)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_f32_0_0t(target("dx.RawBuffer", float, 0, 0), i32)
+
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f32_0_0t(target("dx.RawBuffer", float, 0, 0))
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_v4i32_1_0t(target("dx.RawBuffer", <4 x i32>, 1, 0))
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f64_1_0t(target("dx.RawBuffer", double, 1, 0))
diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl
index 47c1d0d..25fa759 100644
--- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl
+++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl
@@ -65,7 +65,42 @@ export float TestLoad() {
// CHECK-NEXT: %[[VAL:.*]] = load <2 x i32>, ptr %[[BUFPTR]]
// CHECK-NEXT: ret <2 x i32> %[[VAL]]
+export uint TestGetDimensions() {
+ uint dim1, dim2, stride1, stride2;
+ ROSB1.GetDimensions(dim1, stride1);
+ ROSB2.GetDimensions(dim2, stride2);
+ return dim1 + dim2 + stride1 + stride2;
+}
+// CHECK: define noundef i32 @TestGetDimensions()()
+// CHECK: call void @hlsl::RasterizerOrderedStructuredBuffer<float>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} @ROSB1, ptr {{.*}}, ptr {{.*}})
+// CHECK: call void @hlsl::RasterizerOrderedStructuredBuffer<int vector[2]>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} @ROSB2, ptr {{.*}}, ptr {{.*}})
+// CHECK: add
+// CHECK: ret
+
+// CHECK: define {{.*}} void @hlsl::RasterizerOrderedStructuredBuffer<float>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}}, ptr {{.*}} %numStructs, ptr {{.*}} %stride)
+// CHECK: %__handle = getelementptr inbounds nuw %"class.hlsl::RasterizerOrderedStructuredBuffer", ptr %{{.*}}, i32 0, i32 0
+// DXIL-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", float, 1, 1), ptr %__handle
+// CHECK-NEXT: %[[NUMSTRUCTS_PTR:.*]] = load ptr, ptr %numStructs.addr
+// DXIL-NEXT: %[[NUMSTRUCTS:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f32_1_1t(target("dx.RawBuffer", float, 1, 1) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[NUMSTRUCTS]], ptr %[[NUMSTRUCTS_PTR]]
+// CHECK-NEXT: %[[STRIDEPTR:.*]] = load ptr, ptr %stride.addr
+// CHECK-NEXT: store i32 4, ptr %[[STRIDEPTR]]
+// CHECK-NEXT: ret void
+
+// CHECK: define {{.*}} void @hlsl::RasterizerOrderedStructuredBuffer<int vector[2]>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}}, ptr {{.*}} %numStructs, ptr {{.*}} %stride)
+// CHECK: %__handle = getelementptr inbounds nuw %"class.hlsl::RasterizerOrderedStructuredBuffer.0", ptr %{{.*}}, i32 0, i32 0
+// DXIL-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", <2 x i32>, 1, 1), ptr %__handle
+// CHECK-NEXT: %[[NUMSTRUCTS_PTR:.*]] = load ptr, ptr %numStructs.addr
+// DXIL-NEXT: %[[NUMSTRUCTS:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_v2i32_1_1t(target("dx.RawBuffer", <2 x i32>, 1, 1) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[NUMSTRUCTS]], ptr %[[NUMSTRUCTS_PTR]]
+// CHECK-NEXT: %[[STRIDEPTR:.*]] = load ptr, ptr %stride.addr
+// CHECK-NEXT: store i32 8, ptr %[[STRIDEPTR]]
+// CHECK-NEXT: ret void
+
// DXIL: declare i32 @llvm.dx.resource.updatecounter.tdx.RawBuffer_f32_1_1t(target("dx.RawBuffer", float, 1, 1), i8)
// DXIL: declare i32 @llvm.dx.resource.updatecounter.tdx.RawBuffer_v2i32_1_1t(target("dx.RawBuffer", <2 x i32>, 1, 1), i8)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_f32_1_1t(target("dx.RawBuffer", float, 1, 1), i32)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_v2i32_1_1t(target("dx.RawBuffer", <2 x i32>, 1, 1), i32)
+
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f32_1_1t(target("dx.RawBuffer", float, 1, 1))
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_v2i32_1_1t(target("dx.RawBuffer", <2 x i32>, 1, 1))
diff --git a/clang/test/CodeGenHLSL/resources/TypedBuffers-methods.hlsl b/clang/test/CodeGenHLSL/resources/TypedBuffers-methods.hlsl
index b153bda..fdc1ef0 100644
--- a/clang/test/CodeGenHLSL/resources/TypedBuffers-methods.hlsl
+++ b/clang/test/CodeGenHLSL/resources/TypedBuffers-methods.hlsl
@@ -38,5 +38,37 @@ export float TestLoad() {
// CHECK-NEXT: %[[VEC:.*]] = load <4 x i32>, ptr %[[PTR]]
// CHECK-NEXT: ret <4 x i32> %[[VEC]]
+export uint TestGetDimensions() {
+ uint dim1, dim2;
+ Buf.GetDimensions(dim1);
+ RWBuf.GetDimensions(dim2);
+ return dim1 + dim2;
+}
+
+// CHECK: @TestGetDimensions()()
+// CHECK: call void @hlsl::Buffer<float>::GetDimensions(unsigned int&)(ptr {{.*}} @Buf, ptr {{.*}})
+// CHECK: call void @hlsl::RWBuffer<unsigned int vector[4]>::GetDimensions(unsigned int&)(ptr {{.*}} @RWBuf, ptr {{.*}})
+// CHECK: add
+// CHECK: ret
+
+// CHECK: define {{.*}} void @hlsl::Buffer<float>::GetDimensions(unsigned int&)(ptr {{.*}} %this, ptr noalias {{.*}} %dim)
+// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::Buffer", ptr %this1, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.TypedBuffer", float, 0, 0, 0), ptr %[[HANDLE_PTR]]
+// CHECK-NEXT: %[[DIMPTR:.*]] = load ptr, ptr %dim.addr
+// DXIL-NEXT: %[[DIM:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.TypedBuffer_f32_0_0_0t(target("dx.TypedBuffer", float, 0, 0, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[DIM]], ptr %[[DIMPTR]]
+// CHECK-NEXT: ret void
+
+// CHECK: define {{.*}} void @hlsl::RWBuffer<unsigned int vector[4]>::GetDimensions(unsigned int&)(ptr {{.*}} %this, {{.*}} %dim)
+// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::RWBuffer", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.TypedBuffer", <4 x i32>, 1, 0, 0), ptr %[[HANDLE_PTR]]
+// CHECK-NEXT: %[[DIMPTR:.*]] = load ptr, ptr %dim.addr
+// DXIL-NEXT: %[[DIM:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.TypedBuffer_v4i32_1_0_0t(target("dx.TypedBuffer", <4 x i32>, 1, 0, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[DIM]], ptr %[[DIMPTR]]
+// CHECK-NEXT: ret void
+
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.TypedBuffer_f32_0_0_0t(target("dx.TypedBuffer", float, 0, 0, 0), i32)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.TypedBuffer_v4i32_1_0_0t(target("dx.TypedBuffer", <4 x i32>, 1, 0, 0), i32)
+
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.TypedBuffer_f32_0_0_0t(target("dx.TypedBuffer", float, 0, 0, 0))
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.TypedBuffer_v4i32_1_0_0t(target("dx.TypedBuffer", <4 x i32>, 1, 0, 0))
diff --git a/clang/test/Driver/linker-wrapper.c b/clang/test/Driver/linker-wrapper.c
index 52a961d..39b9bcd 100644
--- a/clang/test/Driver/linker-wrapper.c
+++ b/clang/test/Driver/linker-wrapper.c
@@ -102,7 +102,7 @@ __attribute__((visibility("protected"), used)) int x;
// CUDA: clang{{.*}} -o [[IMG_SM70:.+]] -dumpdir a.out.nvptx64.sm_70.img. --target=nvptx64-nvidia-cuda -march=sm_70
// CUDA: clang{{.*}} -o [[IMG_SM52:.+]] -dumpdir a.out.nvptx64.sm_52.img. --target=nvptx64-nvidia-cuda -march=sm_52
-// CUDA: fatbinary{{.*}}-64 --create {{.*}}.fatbin --image=profile=sm_70,file=[[IMG_SM70]] --image=profile=sm_52,file=[[IMG_SM52]]
+// CUDA: fatbinary{{.*}}-64 --create {{.*}}.fatbin --image3=kind=elf,sm=70,file=[[IMG_SM70]] --image3=kind=elf,sm=52,file=[[IMG_SM52]]
// CUDA: usr/bin/ld{{.*}} {{.*}}.openmp.image.{{.*}}.o {{.*}}.cuda.image.{{.*}}.o
// RUN: llvm-offload-binary -o %t.out \
@@ -236,7 +236,7 @@ __attribute__((visibility("protected"), used)) int x;
// RUN: %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=RELOCATABLE-LINK-CUDA
// RELOCATABLE-LINK-CUDA: clang{{.*}} -o {{.*}}.img -dumpdir a.out.nvptx64.sm_89.img. --target=nvptx64-nvidia-cuda
-// RELOCATABLE-LINK-CUDA: fatbinary{{.*}} -64 --create {{.*}}.fatbin --image=profile=sm_89,file={{.*}}.img
+// RELOCATABLE-LINK-CUDA: fatbinary{{.*}} -64 --create {{.*}}.fatbin --image3=kind=elf,sm=89,file={{.*}}.img
// RELOCATABLE-LINK-CUDA: /usr/bin/ld.lld{{.*}}-r
// RELOCATABLE-LINK-CUDA: llvm-objcopy{{.*}}a.out --remove-section .llvm.offloading
diff --git a/clang/test/Misc/pragma-attribute-supported-attributes-list.test b/clang/test/Misc/pragma-attribute-supported-attributes-list.test
index 73d4cb1..ab4153a 100644
--- a/clang/test/Misc/pragma-attribute-supported-attributes-list.test
+++ b/clang/test/Misc/pragma-attribute-supported-attributes-list.test
@@ -35,6 +35,7 @@
// CHECK-NEXT: CFUnknownTransfer (SubjectMatchRule_function)
// CHECK-NEXT: CPUDispatch (SubjectMatchRule_function)
// CHECK-NEXT: CPUSpecific (SubjectMatchRule_function)
+// CHECK-NEXT: CUDAClusterDims (SubjectMatchRule_objc_method, SubjectMatchRule_hasType_functionType)
// CHECK-NEXT: CUDAConstant (SubjectMatchRule_variable)
// CHECK-NEXT: CUDADevice (SubjectMatchRule_function, SubjectMatchRule_variable)
// CHECK-NEXT: CUDADeviceBuiltinSurfaceType (SubjectMatchRule_record)
@@ -43,6 +44,7 @@
// CHECK-NEXT: CUDAGridConstant (SubjectMatchRule_variable_is_parameter)
// CHECK-NEXT: CUDAHost (SubjectMatchRule_function)
// CHECK-NEXT: CUDALaunchBounds (SubjectMatchRule_objc_method, SubjectMatchRule_hasType_functionType)
+// CHECK-NEXT: CUDANoCluster (SubjectMatchRule_objc_method, SubjectMatchRule_hasType_functionType)
// CHECK-NEXT: CUDAShared (SubjectMatchRule_variable)
// CHECK-NEXT: CXX11NoReturn (SubjectMatchRule_function)
// CHECK-NEXT: CallableWhen (SubjectMatchRule_function_is_member)
diff --git a/clang/test/Parser/c2x-auto.c b/clang/test/Parser/c2x-auto.c
index b878a5b..7f80b07 100644
--- a/clang/test/Parser/c2x-auto.c
+++ b/clang/test/Parser/c2x-auto.c
@@ -130,3 +130,30 @@ void atomic(void) {
void attributes(void) {
auto ident [[clang::annotate("this works")]] = 12; // c17-error {{type specifier missing, defaults to 'int'; ISO C99 and later do not support implicit int}}
}
+
+/** GH163090 */
+constexpr auto int a1 = 0; // c23-error {{illegal storage class on file-scoped variable}} \
+ c23-error {{cannot combine with previous 'auto' declaration specifier}} \
+ c17-error {{illegal storage class on file-scoped variable}} \
+ c17-error {{unknown type name 'constexpr'}}
+
+constexpr int auto a2 = 0; // c23-error {{cannot combine with previous 'int' declaration specifier}} \
+ c17-error {{illegal storage class on file-scoped variable}} \
+ c17-error {{unknown type name 'constexpr'}}
+
+auto int b1 = 0; // c23-error {{illegal storage class on file-scoped variable}} \
+ c17-error {{illegal storage class on file-scoped variable}}
+
+int auto b2 = 0; // c23-error {{cannot combine with previous 'int' declaration specifier}} \
+ c17-error {{illegal storage class on file-scoped variable}}
+
+void f() {
+ constexpr auto int c1 = 0; // c23-error {{cannot combine with previous 'auto' declaration specifier}} \
+ c17-error {{use of undeclared identifier 'constexpr'}}
+
+ constexpr int auto c2 = 0; // c23-error {{cannot combine with previous 'int' declaration specifier}} \
+ c17-error {{use of undeclared identifier 'constexpr'}}
+
+ auto int d1 = 0;
+ int auto d2 = 0; // c23-error {{cannot combine with previous 'int' declaration specifier}}
+}
diff --git a/clang/test/Sema/attr-print.c b/clang/test/Sema/attr-print.c
index 8492356..211e61a 100644
--- a/clang/test/Sema/attr-print.c
+++ b/clang/test/Sema/attr-print.c
@@ -35,3 +35,6 @@ int * __sptr * __ptr32 ppsp32;
// CHECK: __attribute__((availability(macos, strict, introduced=10.6)));
void f6(int) __attribute__((availability(macosx,strict,introduced=10.6)));
+
+// CHECK: _libc_intl_domainname asm("__gi__libc_intl_domainname") __attribute__((visibility("hidden")));
+extern const char _libc_intl_domainname[]; extern typeof (_libc_intl_domainname) _libc_intl_domainname asm("__gi__libc_intl_domainname") __attribute__((visibility("hidden")));
diff --git a/clang/test/SemaCUDA/Inputs/cuda.h b/clang/test/SemaCUDA/Inputs/cuda.h
index 10db947..2bf45e03 100644
--- a/clang/test/SemaCUDA/Inputs/cuda.h
+++ b/clang/test/SemaCUDA/Inputs/cuda.h
@@ -13,6 +13,8 @@
#define __managed__ __attribute__((managed))
#define __grid_constant__ __attribute__((grid_constant))
#define __launch_bounds__(...) __attribute__((launch_bounds(__VA_ARGS__)))
+#define __cluster_dims__(...) __attribute__((cluster_dims(__VA_ARGS__)))
+#define __no_cluster__ __attribute__((no_cluster))
struct dim3 {
unsigned x, y, z;
diff --git a/clang/test/SemaCUDA/cluster_dims.cu b/clang/test/SemaCUDA/cluster_dims.cu
new file mode 100644
index 0000000..dcb8737
--- /dev/null
+++ b/clang/test/SemaCUDA/cluster_dims.cu
@@ -0,0 +1,64 @@
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -ast-print -x hip -verify=NS,all %s
+// RUN: %clang_cc1 -triple nvptx-nvidia-cuda -fcuda-is-device -ast-print -x hip -verify=NS,all %s
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx1250 -fcuda-is-device -ast-print -x hip -verify=amd,common,all %s | FileCheck -check-prefixes=CHECK %s
+// RUN: %clang_cc1 -triple nvptx-nvidia-cuda -target-cpu sm_90 -fcuda-is-device -ast-print -x hip -verify=cuda,common,all %s | FileCheck -check-prefixes=CHECK %s
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -aux-triple amdgcn-amd-amdhsa -ast-print -x hip -verify=amd,common,all %s | FileCheck -check-prefixes=CHECK %s
+
+#include "Inputs/cuda.h"
+
+const int constint = 4;
+
+// CHECK: __attribute__((global)) __attribute__((cluster_dims(2, 2, 2))) void test_literal_3d()
+__global__ void __cluster_dims__(2, 2, 2) test_literal_3d() {} //NS-error {{'cluster_dims' is not supported for this GPU architecture}}
+
+// CHECK: __attribute__((global)) __attribute__((cluster_dims(2, 2))) void test_literal_2d()
+__global__ void __cluster_dims__(2, 2) test_literal_2d() {} //NS-error {{'cluster_dims' is not supported for this GPU architecture}}
+
+// CHECK: __attribute__((global)) __attribute__((cluster_dims(4))) void test_literal_1d()
+__global__ void __cluster_dims__(4) test_literal_1d() {} //NS-error {{'cluster_dims' is not supported for this GPU architecture}}
+
+// CHECK: __attribute__((global)) __attribute__((cluster_dims(constint, constint / 4, 1))) void test_constant()
+__global__ void __cluster_dims__(constint, constint / 4, 1) test_constant() {} //NS-error {{'cluster_dims' is not supported for this GPU architecture}}
+
+// CHECK: template <int x, int y, int z> void test_template() __attribute__((cluster_dims(x, y, z)))
+template <int x, int y, int z> void test_template(void) __cluster_dims__(x, y, z){} //NS-error {{'cluster_dims' is not supported for this GPU architecture}}
+
+// CHECK: template <int x, int y, int z> void test_template_expr() __attribute__((cluster_dims(x + constint, y, z)))
+template <int x, int y, int z> void test_template_expr(void) __cluster_dims__(x + constint, y, z) {} //NS-error {{'cluster_dims' is not supported for this GPU architecture}}
+
+//NS-error@+1 {{'cluster_dims' is not supported for this GPU architecture}}
+__global__ void __cluster_dims__(32, 2, 4) test_too_large_dim_0() {} // common-error {{integer constant expression evaluates to value 32 that cannot be represented in a 4-bit unsigned integer type}}
+
+// cuda-error@+2 {{cluster does not support more than 8 thread blocks; 64 provided}}
+// amd-error@+1 {{cluster does not support more than 16 thread blocks; 64 provided}}
+__global__ void __cluster_dims__(4, 4, 4) test_too_large_dim_1() {} // NS-error {{'cluster_dims' is not supported for this GPU architecture}}
+
+// cuda-error@+3 {{cluster does not support more than 8 thread blocks; 64 provided}}
+// amd-error@+2 {{cluster does not support more than 16 thread blocks; 64 provided}}
+template<unsigned a, unsigned b, unsigned c>
+__global__ void __cluster_dims__(a, b, c) test_too_large_dim_template() {} // NS-error {{'cluster_dims' is not supported for this GPU architecture}}
+template __global__ void test_too_large_dim_template<4, 4, 4>(); // common-note {{in instantiation of function template specialization 'test_too_large_dim_template<4U, 4U, 4U>' requested here}}
+
+int none_const_int = 4;
+
+//NS-error@+1 {{'cluster_dims' is not supported for this GPU architecture}}
+__global__ void __cluster_dims__(none_const_int, 2, 4) test_non_constant_0() {} // common-error {{'cluster_dims' attribute requires parameter 0 to be an integer constant}}
+
+//NS-error@+1 {{'cluster_dims' is not supported for this GPU architecture}}
+__global__ void __cluster_dims__(8, none_const_int / 2, 4) test_non_constant_1() {} // common-error {{'cluster_dims' attribute requires parameter 1 to be an integer constant}}
+
+//NS-error@+1 {{'cluster_dims' is not supported for this GPU architecture}}
+__global__ void __cluster_dims__(8, 2, none_const_int / 4) test_non_constant_2() {} // common-error {{'cluster_dims' attribute requires parameter 2 to be an integer constant}}
+
+//NS-error@+1 {{'no_cluster' is not supported for this GPU architecture}}
+__global__ void __no_cluster__ test_no_cluster() {}
+
+//NS-error@+2 {{'no_cluster' is not supported for this GPU architecture}}
+//NS-error@+1 {{'cluster_dims' is not supported for this GPU architecture}}
+__global__ void __no_cluster__ __cluster_dims__(2,2,2) test_have_both() {} // common-error {{'cluster_dims' and 'no_cluster' attributes are not compatible}} common-note {{conflicting attribute is here}}
+
+template <int... args>
+__cluster_dims__(args) void test_template_variadic_args(void) {} // all-error {{expression contains unexpanded parameter pack 'args'}}
+
+template <int... args>
+__cluster_dims__(1, args) void test_template_variadic_args_2(void) {} // all-error {{expression contains unexpanded parameter pack 'args'}}
diff --git a/clang/test/SemaCXX/cxx2c-template-template-param.cpp b/clang/test/SemaCXX/cxx2c-template-template-param.cpp
index 4ad3fd9..704df31 100644
--- a/clang/test/SemaCXX/cxx2c-template-template-param.cpp
+++ b/clang/test/SemaCXX/cxx2c-template-template-param.cpp
@@ -350,3 +350,87 @@ template <A<concept missing<int>> T> // expected-error {{expected expression}} \
// expected-error {{expected unqualified-id}}
auto f();
}
+
+namespace concept_arg_normalization {
+
+template <typename T,
+ template <typename...> concept C1>
+concept one = (C1<T>); // #concept-arg-one
+
+template <typename T>
+concept A = true; // #concept-arg-A
+
+template <typename T>
+concept BetterA = A<T> && true;
+
+template <typename T>
+concept B = true; // #concept-arg-B
+
+template <typename T>
+concept False = false; // #concept-arg-False
+
+template <typename T>
+requires one<T, A>
+void f1(T){} // #concept-arg-f1-1
+
+template <typename T>
+requires one<T, B>
+void f1(T){} // #concept-arg-f1-2
+
+template <typename T>
+requires one<T, A>
+void f2(T){}
+
+template <typename T>
+requires one<T, BetterA>
+void f2(T){}
+
+
+template <template <typename> concept CT>
+requires one<int, A>
+void f3(){} // #concept-arg-f3-1
+
+template <template <typename> concept CT>
+requires one<int, CT>
+void f3(){} // #concept-arg-f3-2
+
+template <typename T>
+requires one<T, False> void f4(T){} // #concept-arg-f4
+
+
+void test() {
+ f1(0);
+ // expected-error@-1 {{call to 'f1' is ambiguous}}
+ // expected-note@#concept-arg-f1-1{{candidate function [with T = int]}}
+ // expected-note@#concept-arg-f1-2{{candidate function [with T = int]}}
+ // expected-note@#concept-arg-A {{similar constraint expressions not considered equivalent}}
+ // expected-note@#concept-arg-B {{similar constraint expression here}}
+ f2(0);
+
+ f3<BetterA>();
+ // expected-error@-1 {{call to 'f3' is ambiguous}}
+ // expected-note@#concept-arg-f3-1 {{candidate function [with CT = concept_arg_normalization::BetterA]}}
+ // expected-note@#concept-arg-f3-2 {{candidate function [with CT = concept_arg_normalization::BetterA]}}
+
+static_assert(one<int, A>);
+static_assert(one<int, False>);
+// expected-error@-1 {{static assertion failed}} \
+// expected-note@-1 {{because 'one<int, False>' evaluated to false}}
+// expected-note@#concept-arg-one {{because 'int' does not satisfy 'False'}}
+// expected-note@#concept-arg-False {{because 'false' evaluated to false}}
+
+f4(0);
+// expected-error@-1 {{no matching function for call to 'f4'}}
+// expected-note@#concept-arg-f4 {{candidate template ignored: constraints not satisfied [with T = int]}}
+// expected-note@#concept-arg-f4 {{because 'one<int, False>'}}
+// expected-note@#concept-arg-one {{because 'int' does not satisfy 'False'}}
+// expected-note@#concept-arg-False {{because 'false' evaluated to false}}
+
+}
+
+template <typename T, template <typename...> concept C1>
+concept TestBinary = T::a || C1<T>;
+static_assert(TestBinary<int, A>);
+
+
+}
diff --git a/clang/test/SemaHLSL/Language/TemplateOutArg.hlsl b/clang/test/SemaHLSL/Language/TemplateOutArg.hlsl
index 2d6252c..3365dbe 100644
--- a/clang/test/SemaHLSL/Language/TemplateOutArg.hlsl
+++ b/clang/test/SemaHLSL/Language/TemplateOutArg.hlsl
@@ -195,6 +195,81 @@ T buzz(int X, T Y) {
return X + Y;
}
+// Case 4: Verify that the parameter modifier attributes are instantiated
+// for both templated and non-templated arguments, and that the non-templated
+// out argument type is not modified by the template instantiation.
+
+// CHECK-LABEL: FunctionTemplateDecl {{.*}} fizz_two
+
+// Check the pattern decl.
+// CHECK: FunctionDecl {{.*}} fizz_two 'void (inout T, out int)'
+// CHECK-NEXT: ParmVarDecl {{.*}} referenced V 'T'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} inout
+// CHECK-NEXT: ParmVarDecl {{.*}} referenced I 'int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+
+// Check the 3 instantiations (int, float, & double).
+
+// CHECK-LABEL: FunctionDecl {{.*}} used fizz_two 'void (inout int, out int)' implicit_instantiation
+// CHECK: ParmVarDecl {{.*}} used V 'int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} inout
+// CHECK: ParmVarDecl {{.*}} used I 'int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+
+// CHECK-LABEL: FunctionDecl {{.*}} used fizz_two 'void (inout float, out int)' implicit_instantiation
+// CHECK: ParmVarDecl {{.*}} used V 'float &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} inout
+// CHECK: ParmVarDecl {{.*}} used I 'int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+
+// CHECK-LABEL: FunctionDecl {{.*}} used fizz_two 'void (inout double, out int)' implicit_instantiation
+// CHECK: ParmVarDecl {{.*}} used V 'double &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} inout
+// CHECK: ParmVarDecl {{.*}} used I 'int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+template <typename T>
+void fizz_two(inout T V, out int I) {
+ V += 2;
+ I = V;
+}
+
+// Case 5: Verify that `in` parameter modifier attributes are instantiated
+// for both templated and non-templated arguments and argument types are not
+// modified
+
+// CHECK-LABEL: FunctionTemplateDecl {{.*}} buzz_two
+
+// Check the pattern decl.
+// CHECK: FunctionDecl {{.*}} buzz_two 'int (T, int)'
+// CHECK-NEXT: ParmVarDecl {{.*}} referenced A 'T'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} in
+// CHECK-NEXT: ParmVarDecl {{.*}} referenced B 'int'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} in
+
+// Check the 3 instantiations (int, float, & double).
+
+// CHECK-LABEL: FunctionDecl {{.*}} used buzz_two 'int (int, int)' implicit_instantiation
+// CHECK: ParmVarDecl {{.*}} used A 'int'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} in
+// CHECK: ParmVarDecl {{.*}} used B 'int'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} in
+
+// CHECK-LABEL: FunctionDecl {{.*}} used buzz_two 'int (float, int)' implicit_instantiation
+// CHECK: ParmVarDecl {{.*}} used A 'float'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} in
+// CHECK: ParmVarDecl {{.*}} used B 'int'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} in
+
+// CHECK-LABEL: FunctionDecl {{.*}} used buzz_two 'int (double, int)' implicit_instantiation
+// CHECK: ParmVarDecl {{.*}} used A 'double'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} in
+// CHECK: ParmVarDecl {{.*}} used B 'int'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} in
+template <typename T>
+int buzz_two(in T A, in int B) {
+ return A + B;
+}
+
export void caller() {
int X = 2;
float Y = 3.3;
@@ -211,4 +286,12 @@ export void caller() {
X = buzz(X, X);
Y = buzz(X, Y);
Z = buzz(X, Z);
+
+ fizz_two(X, X);
+ fizz_two(Y, X);
+ fizz_two(Z, X);
+
+ X = buzz_two(X, X);
+ X = buzz_two(Y, X);
+ X = buzz_two(Z, X);
}
diff --git a/clang/test/SemaHLSL/Operators/logical-not.hlsl b/clang/test/SemaHLSL/Operators/logical-not.hlsl
new file mode 100644
index 0000000..d06ca39
--- /dev/null
+++ b/clang/test/SemaHLSL/Operators/logical-not.hlsl
@@ -0,0 +1,53 @@
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -ast-dump -ast-dump-filter=case | FileCheck %s
+
+// CHECK-LABEL: FunctionDecl {{.*}} used case1 'uint32_t2 (uint32_t2)'
+// CHECK-NEXT: ParmVarDecl {{.*}} used b 'uint32_t2':'vector<uint32_t, 2>'
+// CHECK-NEXT: CompoundStmt
+// CHECK-NEXT: ReturnStmt
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'vector<uint32_t, 2>' <IntegralCast>
+// CHECK-NEXT: UnaryOperator {{.*}} 'vector<bool, 2>' prefix '!' cannot overflow
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'vector<bool, 2>' <IntegralToBoolean>
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'uint32_t2':'vector<uint32_t, 2>' <LValueToRValue>
+// CHECK-NEXT: DeclRefExpr {{.*}} 'uint32_t2':'vector<uint32_t, 2>' lvalue ParmVar {{.*}} 'b' 'uint32_t2':'vector<uint32_t, 2>'
+export uint32_t2 case1(uint32_t2 b) {
+ return !b;
+}
+
+// CHECK-LABEL: FunctionDecl {{.*}} used case2 'int32_t3 (int32_t3)'
+// CHECK-NEXT: ParmVarDecl {{.*}} used b 'int32_t3':'vector<int32_t, 3>'
+// CHECK-NEXT: CompoundStmt
+// CHECK-NEXT: ReturnStmt
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'vector<int32_t, 3>' <IntegralCast>
+// CHECK-NEXT: UnaryOperator {{.*}} 'vector<bool, 3>' prefix '!' cannot overflow
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'vector<bool, 3>' <IntegralToBoolean>
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'int32_t3':'vector<int32_t, 3>' <LValueToRValue>
+// CHECK-NEXT: DeclRefExpr {{.*}} 'int32_t3':'vector<int32_t, 3>' lvalue ParmVar {{.*}} 'b' 'int32_t3':'vector<int32_t, 3>'
+export int32_t3 case2(int32_t3 b) {
+ return !b;
+}
+
+// CHECK-LABEL: FunctionDecl {{.*}} used case3 'float16_t (float16_t)'
+// CHECK-NEXT: ParmVarDecl {{.*}} used b 'float16_t':'half'
+// CHECK-NEXT: CompoundStmt
+// CHECK-NEXT: ReturnStmt
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'float16_t':'half' <IntegralToFloating>
+// CHECK-NEXT: UnaryOperator {{.*}} 'bool' prefix '!' cannot overflow
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'bool' <FloatingToBoolean>
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'float16_t':'half' <LValueToRValue>
+// CHECK-NEXT: DeclRefExpr {{.*}} 'float16_t':'half' lvalue ParmVar {{.*}} 'b' 'float16_t':'half'
+export float16_t case3(float16_t b) {
+ return !b;
+}
+
+// CHECK-LABEL: FunctionDecl {{.*}} used case4 'float4 (float4)'
+// CHECK-NEXT: ParmVarDecl {{.*}} used b 'float4':'vector<float, 4>'
+// CHECK-NEXT: CompoundStmt
+// CHECK-NEXT: ReturnStmt
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'vector<float, 4>' <IntegralToFloating>
+// CHECK-NEXT: UnaryOperator {{.*}} 'vector<bool, 4>' prefix '!' cannot overflow
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'vector<bool, 4>' <FloatingToBoolean>
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'float4':'vector<float, 4>' <LValueToRValue>
+// CHECK-NEXT: DeclRefExpr {{.*}} 'float4':'vector<float, 4>' lvalue ParmVar {{.*}} 'b' 'float4':'vector<float, 4>'
+export float4 case4(float4 b) {
+ return !b;
+}
diff --git a/clang/test/SemaTemplate/concepts.cpp b/clang/test/SemaTemplate/concepts.cpp
index aaa20f6..a54bc02 100644
--- a/clang/test/SemaTemplate/concepts.cpp
+++ b/clang/test/SemaTemplate/concepts.cpp
@@ -1514,6 +1514,31 @@ static_assert( requires {{ &f } -> C;} ); // expected-error {{reference to overl
}
+namespace GH162092 {
+
+template <typename T>
+struct vector;
+
+template <typename T, typename U>
+concept C = __is_same_as(T, U);
+
+template<class T, auto Cpt>
+concept generic_range_value = requires {
+ Cpt.template operator()<int>();
+};
+
+
+template<generic_range_value<[]<
+ C<int>
+ >() {}> T>
+void x() {}
+
+void foo() {
+ x<vector<int>>();
+}
+
+}
+
namespace GH162770 {
enum e {};
template<e> struct s {};
diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
index 4d5b956..bfeca17 100644
--- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
+++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
@@ -396,8 +396,8 @@ fatbinary(ArrayRef<std::pair<StringRef, StringRef>> InputFiles,
CmdArgs.push_back("--create");
CmdArgs.push_back(*TempFileOrErr);
for (const auto &[File, Arch] : InputFiles)
- CmdArgs.push_back(
- Args.MakeArgString("--image=profile=" + Arch + ",file=" + File));
+ CmdArgs.push_back(Args.MakeArgString(
+ "--image3=kind=elf,sm=" + Arch.drop_front(3) + ",file=" + File));
if (Error Err = executeCommands(*FatBinaryPath, CmdArgs))
return std::move(Err);
diff --git a/clang/unittests/Format/AlignBracketsTest.cpp b/clang/unittests/Format/AlignBracketsTest.cpp
index c4380ae..ea8db51 100644
--- a/clang/unittests/Format/AlignBracketsTest.cpp
+++ b/clang/unittests/Format/AlignBracketsTest.cpp
@@ -778,6 +778,19 @@ TEST_F(AlignBracketsTest, ParenthesesAndOperandAlignment) {
Style);
}
+TEST_F(AlignBracketsTest, BlockIndentAndNamespace) {
+ auto Style = getLLVMStyleWithColumns(120);
+ Style.AllowShortNamespacesOnASingleLine = true;
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+
+ verifyNoCrash(
+ "namespace {\n"
+ "void xxxxxxxxxxxxxxxxxxxxx(nnnnn::TTTTTTTTTTTTT const *mmmm,\n"
+ " YYYYYYYYYYYYYYYYY &yyyyyyyyyyyyyy);\n"
+ "} //",
+ Style);
+}
+
} // namespace
} // namespace test
} // namespace format
diff --git a/clang/unittests/Format/ConfigParseTest.cpp b/clang/unittests/Format/ConfigParseTest.cpp
index 6111e86..6488e38 100644
--- a/clang/unittests/Format/ConfigParseTest.cpp
+++ b/clang/unittests/Format/ConfigParseTest.cpp
@@ -176,7 +176,6 @@ TEST(ConfigParseTest, ParsesConfigurationBools) {
CHECK_PARSE_BOOL(BreakBeforeTernaryOperators);
CHECK_PARSE_BOOL(BreakStringLiterals);
CHECK_PARSE_BOOL(CompactNamespaces);
- CHECK_PARSE_BOOL(Cpp11BracedListStyle);
CHECK_PARSE_BOOL(DerivePointerAlignment);
CHECK_PARSE_BOOL_FIELD(DerivePointerAlignment, "DerivePointerBinding");
CHECK_PARSE_BOOL(DisableFormat);
@@ -1139,6 +1138,18 @@ TEST(ConfigParseTest, ParsesConfiguration) {
FormatStyle::SDS_Leave);
CHECK_PARSE("SeparateDefinitionBlocks: Never", SeparateDefinitionBlocks,
FormatStyle::SDS_Never);
+
+ CHECK_PARSE("Cpp11BracedListStyle: Block", Cpp11BracedListStyle,
+ FormatStyle::BLS_Block);
+ CHECK_PARSE("Cpp11BracedListStyle: FunctionCall", Cpp11BracedListStyle,
+ FormatStyle::BLS_FunctionCall);
+ CHECK_PARSE("Cpp11BracedListStyle: AlignFirstComment", Cpp11BracedListStyle,
+ FormatStyle::BLS_AlignFirstComment);
+ // For backward compatibility:
+ CHECK_PARSE("Cpp11BracedListStyle: false", Cpp11BracedListStyle,
+ FormatStyle::BLS_Block);
+ CHECK_PARSE("Cpp11BracedListStyle: true", Cpp11BracedListStyle,
+ FormatStyle::BLS_AlignFirstComment);
}
TEST(ConfigParseTest, ParsesConfigurationWithLanguages) {
@@ -1264,6 +1275,13 @@ TEST(ConfigParseTest, ParsesConfigurationWithLanguages) {
IndentWidth, 56u);
}
+TEST(ConfigParseTest, AllowCommentOnlyConfigFile) {
+ FormatStyle Style = {};
+ Style.Language = FormatStyle::LK_Cpp;
+ EXPECT_EQ(parseConfiguration("#Language: C", &Style), ParseError::Success);
+ EXPECT_EQ(Style.Language, FormatStyle::LK_Cpp);
+}
+
TEST(ConfigParseTest, AllowCppForC) {
FormatStyle Style = {};
Style.Language = FormatStyle::LK_C;
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index b9ad930..0fb8139 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -14363,7 +14363,7 @@ TEST_F(FormatTest, LayoutCxx11BraceInitializers) {
BreakBeforeLambdaBody);
FormatStyle ExtraSpaces = getLLVMStyle();
- ExtraSpaces.Cpp11BracedListStyle = false;
+ ExtraSpaces.Cpp11BracedListStyle = FormatStyle::BLS_Block;
ExtraSpaces.ColumnLimit = 75;
verifyFormat("vector<int> x{ 1, 2, 3, 4 };", ExtraSpaces);
verifyFormat("vector<T> x{ {}, {}, {}, {} };", ExtraSpaces);
@@ -20346,7 +20346,7 @@ TEST_F(FormatTest, AlignConsecutiveDeclarations) {
" return 0;\n"
"}()};",
BracedAlign);
- BracedAlign.Cpp11BracedListStyle = false;
+ BracedAlign.Cpp11BracedListStyle = FormatStyle::BLS_Block;
verifyFormat("const auto result{ []() {\n"
" const auto something = 1;\n"
" return 2;\n"
@@ -21953,14 +21953,14 @@ TEST_F(FormatTest, CatchAlignArrayOfStructuresRightAlignment) {
"});",
Style);
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
verifyFormat("struct test demo[] = {\n"
" { 56, 23, \"hello\" },\n"
" { -1, 93463, \"world\" },\n"
" { 7, 5, \"!!\" }\n"
"};",
Style);
- Style.Cpp11BracedListStyle = true;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_AlignFirstComment;
Style.ColumnLimit = 0;
verifyFormat(
@@ -22220,14 +22220,14 @@ TEST_F(FormatTest, CatchAlignArrayOfStructuresLeftAlignment) {
" };",
Style);
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
verifyFormat("struct test demo[] = {\n"
" { 56, 23, \"hello\" },\n"
" { -1, 93463, \"world\" },\n"
" { 7, 5, \"!!\" }\n"
"};",
Style);
- Style.Cpp11BracedListStyle = true;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_AlignFirstComment;
Style.ColumnLimit = 0;
verifyFormat(
diff --git a/clang/unittests/Format/FormatTestCSharp.cpp b/clang/unittests/Format/FormatTestCSharp.cpp
index ea85ed6..d7fb15d 100644
--- a/clang/unittests/Format/FormatTestCSharp.cpp
+++ b/clang/unittests/Format/FormatTestCSharp.cpp
@@ -1194,7 +1194,7 @@ TEST_F(FormatTestCSharp, CSharpSpaces) {
Style.SpaceBeforeSquareBrackets = false;
Style.SpacesInSquareBrackets = false;
Style.SpaceBeforeCpp11BracedList = true;
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
Style.SpacesInContainerLiterals = false;
Style.SpaceAfterCStyleCast = false;
diff --git a/clang/unittests/Format/FormatTestComments.cpp b/clang/unittests/Format/FormatTestComments.cpp
index 69026bc..fc80bf4 100644
--- a/clang/unittests/Format/FormatTestComments.cpp
+++ b/clang/unittests/Format/FormatTestComments.cpp
@@ -4699,6 +4699,58 @@ TEST_F(FormatTestComments, SplitCommentIntroducers) {
getLLVMStyleWithColumns(10)));
}
+TEST_F(FormatTestComments, LineCommentsOnStartOfFunctionCall) {
+ auto Style = getLLVMStyle();
+
+ EXPECT_EQ(Style.Cpp11BracedListStyle, FormatStyle::BLS_AlignFirstComment);
+ verifyFormat("Type name{// Comment\n"
+ " value};",
+ Style);
+
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
+ verifyFormat("Type name{ // Comment\n"
+ " value\n"
+ "};",
+ Style);
+
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_FunctionCall;
+ verifyFormat("Type name{ // Comment\n"
+ " value};",
+ Style);
+
+ verifyFormat("T foo( // Comment\n"
+ " arg);",
+ Style);
+
+ verifyFormat("T bar{ // Comment\n"
+ " arg};",
+ Style);
+
+ verifyFormat("T baz({ // Comment\n"
+ " arg});",
+ Style);
+
+ verifyFormat("T baz{{ // Comment\n"
+ " arg}};",
+ Style);
+
+ verifyFormat("T b0z(f( // Comment\n"
+ " arg));",
+ Style);
+
+ verifyFormat("T b0z(F{ // Comment\n"
+ " arg});",
+ Style);
+
+ verifyFormat("func( // Comment\n"
+ " arg);",
+ Style);
+
+ verifyFormat("func({ // Comment\n"
+ " arg});",
+ Style);
+}
+
} // end namespace
} // namespace test
} // end namespace format
diff --git a/clang/unittests/Format/FormatTestJava.cpp b/clang/unittests/Format/FormatTestJava.cpp
index 1275564..1416614b 100644
--- a/clang/unittests/Format/FormatTestJava.cpp
+++ b/clang/unittests/Format/FormatTestJava.cpp
@@ -236,7 +236,7 @@ TEST_F(FormatTestJava, ArrayInitializers) {
"};");
FormatStyle Style = getStyleWithColumns(65);
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
verifyFormat(
"expected = new int[] { 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,\n"
" 100, 100, 100, 100, 100, 100, 100, 100, 100, 100 };",
diff --git a/clang/unittests/Format/FormatTestTextProto.cpp b/clang/unittests/Format/FormatTestTextProto.cpp
index fd65c9a..6cddb838 100644
--- a/clang/unittests/Format/FormatTestTextProto.cpp
+++ b/clang/unittests/Format/FormatTestTextProto.cpp
@@ -514,7 +514,7 @@ TEST_F(FormatTestTextProto, FormatsRepeatedListInitializers) {
"key: value");
auto Style = getDefaultStyle();
- Style.Cpp11BracedListStyle = true;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_AlignFirstComment;
verifyFormat("keys: [1]", Style);
}
diff --git a/clang/unittests/Format/FormatTestVerilog.cpp b/clang/unittests/Format/FormatTestVerilog.cpp
index 5c50ae6..63e2cadf 100644
--- a/clang/unittests/Format/FormatTestVerilog.cpp
+++ b/clang/unittests/Format/FormatTestVerilog.cpp
@@ -1287,7 +1287,7 @@ TEST_F(FormatTestVerilog, StringLiteral) {
getStyleWithColumns(getDefaultStyle(), 32));
// Space around braces should be correct.
auto Style = getStyleWithColumns(getDefaultStyle(), 24);
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
verifyFormat(R"(x({ "xxxxxxxxxxxxxxxx ",
"xxxx" });)",
R"(x("xxxxxxxxxxxxxxxx xxxx");)", Style);
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index 1152466..1002515 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -1129,6 +1129,11 @@ TEST_F(TokenAnnotatorTest, UnderstandsOverloadedOperators) {
ASSERT_EQ(Tokens.size(), 7u) << Tokens;
// Not TT_FunctionDeclarationName.
EXPECT_TOKEN(Tokens[3], tok::kw_operator, TT_Unknown);
+
+ Tokens = annotate("SomeAPI::operator()();");
+ ASSERT_EQ(Tokens.size(), 9u) << Tokens;
+ // Not TT_FunctionDeclarationName.
+ EXPECT_TOKEN(Tokens[2], tok::kw_operator, TT_Unknown);
}
TEST_F(TokenAnnotatorTest, OverloadedOperatorInTemplate) {
diff --git a/clang/unittests/StaticAnalyzer/RangeSetTest.cpp b/clang/unittests/StaticAnalyzer/RangeSetTest.cpp
index 9e36aab..a8c7626 100644
--- a/clang/unittests/StaticAnalyzer/RangeSetTest.cpp
+++ b/clang/unittests/StaticAnalyzer/RangeSetTest.cpp
@@ -27,21 +27,21 @@ template <class RangeOrSet> static std::string toString(const RangeOrSet &Obj) {
Obj.dump(SS);
return ObjRepresentation;
}
-LLVM_ATTRIBUTE_UNUSED static std::string toString(const llvm::APSInt &Point) {
+[[maybe_unused]] static std::string toString(const llvm::APSInt &Point) {
return toString(Point, 10);
}
// We need it here for better fail diagnostics from gtest.
-LLVM_ATTRIBUTE_UNUSED static std::ostream &operator<<(std::ostream &OS,
- const RangeSet &Set) {
+[[maybe_unused]] static std::ostream &operator<<(std::ostream &OS,
+ const RangeSet &Set) {
return OS << toString(Set);
}
// We need it here for better fail diagnostics from gtest.
-LLVM_ATTRIBUTE_UNUSED static std::ostream &operator<<(std::ostream &OS,
- const Range &R) {
+[[maybe_unused]] static std::ostream &operator<<(std::ostream &OS,
+ const Range &R) {
return OS << toString(R);
}
-LLVM_ATTRIBUTE_UNUSED static std::ostream &operator<<(std::ostream &OS,
- APSIntType Ty) {
+[[maybe_unused]] static std::ostream &operator<<(std::ostream &OS,
+ APSIntType Ty) {
return OS << (Ty.isUnsigned() ? "u" : "s") << Ty.getBitWidth();
}
diff --git a/clang/unittests/StaticAnalyzer/SValTest.cpp b/clang/unittests/StaticAnalyzer/SValTest.cpp
index db4b01b..f96456a 100644
--- a/clang/unittests/StaticAnalyzer/SValTest.cpp
+++ b/clang/unittests/StaticAnalyzer/SValTest.cpp
@@ -34,13 +34,12 @@ namespace clang {
// getType() tests include whole bunch of type comparisons,
// so when something is wrong, it's good to have gtest telling us
// what are those types.
-LLVM_ATTRIBUTE_UNUSED std::ostream &operator<<(std::ostream &OS,
- const QualType &T) {
+[[maybe_unused]] std::ostream &operator<<(std::ostream &OS, const QualType &T) {
return OS << T.getAsString();
}
-LLVM_ATTRIBUTE_UNUSED std::ostream &operator<<(std::ostream &OS,
- const CanQualType &T) {
+[[maybe_unused]] std::ostream &operator<<(std::ostream &OS,
+ const CanQualType &T) {
return OS << QualType{T};
}
diff --git a/clang/utils/TableGen/MveEmitter.cpp b/clang/utils/TableGen/MveEmitter.cpp
index a003b5e..f55a5f5 100644
--- a/clang/utils/TableGen/MveEmitter.cpp
+++ b/clang/utils/TableGen/MveEmitter.cpp
@@ -1684,7 +1684,8 @@ void EmitterBase::EmitBuiltinCG(raw_ostream &OS) {
OS << " case ARM::BI__builtin_arm_" << OI.Int->builtinExtension()
<< "_" << OI.Name << ":\n";
for (size_t i = 0, e = MG.ParamTypes.size(); i < e; ++i)
- OS << " Param" << utostr(i) << " = " << OI.ParamValues[i] << ";\n";
+ OS << " Param" << utostr(i) << " = static_cast<"
+ << MG.ParamTypes[i] << ">(" << OI.ParamValues[i] << ");\n";
OS << " break;\n";
}
OS << " }\n";
diff --git a/compiler-rt/test/asan/TestCases/Windows/basic_exception_handling.cpp b/compiler-rt/test/asan/TestCases/Windows/basic_exception_handling.cpp
new file mode 100644
index 0000000..6f02814
--- /dev/null
+++ b/compiler-rt/test/asan/TestCases/Windows/basic_exception_handling.cpp
@@ -0,0 +1,33 @@
+// RUN: %clangxx_asan %s -o %t
+// RUN: %run %t | FileCheck %s
+
+// This test tests that declaring a parameter in a catch-block does not produce a false positive
+// ASan error on Windows.
+
+// This code is based on the repro in https://github.com/google/sanitizers/issues/749
+#include <cstdio>
+#include <exception>
+
+void throwInFunction() { throw std::exception("test2"); }
+
+int main() {
+ // case 1: direct throw
+ try {
+ throw std::exception("test1");
+ } catch (const std::exception &ex) {
+ puts(ex.what());
+ // CHECK: test1
+ }
+
+ // case 2: throw in function
+ try {
+ throwInFunction();
+ } catch (const std::exception &ex) {
+ puts(ex.what());
+ // CHECK: test2
+ }
+
+ printf("Success!\n");
+ // CHECK: Success!
+ return 0;
+}
diff --git a/flang/include/flang/Optimizer/Dialect/FIRType.h b/flang/include/flang/Optimizer/Dialect/FIRType.h
index 6188c446..ceee24a 100644
--- a/flang/include/flang/Optimizer/Dialect/FIRType.h
+++ b/flang/include/flang/Optimizer/Dialect/FIRType.h
@@ -389,6 +389,9 @@ bool isPolymorphicType(mlir::Type ty);
/// value.
bool isUnlimitedPolymorphicType(mlir::Type ty);
+/// Return true if CLASS(*)
+bool isClassStarType(mlir::Type ty);
+
/// Return true iff `ty` is the type of an assumed type. In FIR,
/// assumed types are of the form `[fir.ref|ptr|heap]fir.box<[fir.array]none>`,
/// or `fir.ref|ptr|heap<[fir.array]none>`.
diff --git a/flang/include/flang/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.h b/flang/include/flang/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.h
index 408f039..4817ed9 100644
--- a/flang/include/flang/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.h
+++ b/flang/include/flang/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.h
@@ -29,6 +29,20 @@ struct OpenACCPointerLikeModel
getPointeeTypeCategory(mlir::Type pointer,
mlir::TypedValue<mlir::acc::PointerLikeType> varPtr,
mlir::Type varType) const;
+
+ mlir::Value genAllocate(mlir::Type pointer, mlir::OpBuilder &builder,
+ mlir::Location loc, llvm::StringRef varName,
+ mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const;
+
+ bool genFree(mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const;
+
+ bool genCopy(mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const;
};
template <typename T>
diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.td b/flang/include/flang/Optimizer/OpenMP/Passes.td
index e2f0920..bfbaa5f 100644
--- a/flang/include/flang/Optimizer/OpenMP/Passes.td
+++ b/flang/include/flang/Optimizer/OpenMP/Passes.td
@@ -93,6 +93,10 @@ def LowerWorkshare : Pass<"lower-workshare", "::mlir::ModuleOp"> {
let summary = "Lower workshare construct";
}
+def LowerWorkdistribute : Pass<"lower-workdistribute", "::mlir::ModuleOp"> {
+ let summary = "Lower workdistribute construct";
+}
+
def GenericLoopConversionPass
: Pass<"omp-generic-loop-conversion", "mlir::func::FuncOp"> {
let summary = "Converts OpenMP generic `omp.loop` to semantically "
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index 525fb0e..0595ca0 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -6076,7 +6076,7 @@ private:
if (resTy != wrappedSymTy) {
// check size of the pointed to type so we can't overflow by writing
// double precision to a single precision allocation, etc
- LLVM_ATTRIBUTE_UNUSED auto getBitWidth = [this](mlir::Type ty) {
+ [[maybe_unused]] auto getBitWidth = [this](mlir::Type ty) {
// 15.6.2.6.3: differering result types should be integer, real,
// complex or logical
if (auto cmplx = mlir::dyn_cast_or_null<mlir::ComplexType>(ty))
diff --git a/flang/lib/Lower/ConvertExpr.cpp b/flang/lib/Lower/ConvertExpr.cpp
index d7f94e1..a46d219 100644
--- a/flang/lib/Lower/ConvertExpr.cpp
+++ b/flang/lib/Lower/ConvertExpr.cpp
@@ -5603,7 +5603,7 @@ private:
return newIters;
};
if (useTripsForSlice) {
- LLVM_ATTRIBUTE_UNUSED auto vectorSubscriptShape =
+ [[maybe_unused]] auto vectorSubscriptShape =
getShape(arrayOperands.back());
auto undef = fir::UndefOp::create(builder, loc, idxTy);
trips.push_back(undef);
diff --git a/flang/lib/Optimizer/Builder/Character.cpp b/flang/lib/Optimizer/Builder/Character.cpp
index a096099..155bc0f 100644
--- a/flang/lib/Optimizer/Builder/Character.cpp
+++ b/flang/lib/Optimizer/Builder/Character.cpp
@@ -92,7 +92,7 @@ getCompileTimeLength(const fir::CharBoxValue &box) {
/// Detect the precondition that the value `str` does not reside in memory. Such
/// values will have a type `!fir.array<...x!fir.char<N>>` or `!fir.char<N>`.
-LLVM_ATTRIBUTE_UNUSED static bool needToMaterialize(mlir::Value str) {
+[[maybe_unused]] static bool needToMaterialize(mlir::Value str) {
return mlir::isa<fir::SequenceType>(str.getType()) ||
fir::isa_char(str.getType());
}
diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
index e07baaf..0195178 100644
--- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
@@ -2169,7 +2169,8 @@ IntrinsicLibrary::genElementalCall<IntrinsicLibrary::ExtendedGenerator>(
for (const fir::ExtendedValue &arg : args) {
auto *box = arg.getBoxOf<fir::BoxValue>();
if (!arg.getUnboxed() && !arg.getCharBox() &&
- !(box && fir::isScalarBoxedRecordType(fir::getBase(*box).getType())))
+ !(box && (fir::isScalarBoxedRecordType(fir::getBase(*box).getType()) ||
+ fir::isClassStarType(fir::getBase(*box).getType()))))
fir::emitFatalError(loc, "nonscalar intrinsic argument");
}
if (outline)
diff --git a/flang/lib/Optimizer/Dialect/FIRType.cpp b/flang/lib/Optimizer/Dialect/FIRType.cpp
index 4a9579c..48e1622 100644
--- a/flang/lib/Optimizer/Dialect/FIRType.cpp
+++ b/flang/lib/Optimizer/Dialect/FIRType.cpp
@@ -336,6 +336,17 @@ bool isBoxedRecordType(mlir::Type ty) {
return false;
}
+// CLASS(*)
+bool isClassStarType(mlir::Type ty) {
+ if (auto clTy = mlir::dyn_cast<fir::ClassType>(fir::unwrapRefType(ty))) {
+ if (mlir::isa<mlir::NoneType>(clTy.getEleTy()))
+ return true;
+ mlir::Type innerType = clTy.unwrapInnerType();
+ return innerType && mlir::isa<mlir::NoneType>(innerType);
+ }
+ return false;
+}
+
bool isScalarBoxedRecordType(mlir::Type ty) {
if (auto refTy = fir::dyn_cast_ptrEleTy(ty))
ty = refTy;
@@ -398,12 +409,8 @@ bool isPolymorphicType(mlir::Type ty) {
bool isUnlimitedPolymorphicType(mlir::Type ty) {
// CLASS(*)
- if (auto clTy = mlir::dyn_cast<fir::ClassType>(fir::unwrapRefType(ty))) {
- if (mlir::isa<mlir::NoneType>(clTy.getEleTy()))
- return true;
- mlir::Type innerType = clTy.unwrapInnerType();
- return innerType && mlir::isa<mlir::NoneType>(innerType);
- }
+ if (isClassStarType(ty))
+ return true;
// TYPE(*)
return isAssumedType(ty);
}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp b/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp
index a48b7ba..63a5803 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp
@@ -21,24 +21,27 @@
//===----------------------------------------------------------------------===//
/// Log RAW or WAW conflict.
-static void LLVM_ATTRIBUTE_UNUSED logConflict(llvm::raw_ostream &os,
- mlir::Value writtenOrReadVarA,
- mlir::Value writtenVarB);
+[[maybe_unused]] static void logConflict(llvm::raw_ostream &os,
+ mlir::Value writtenOrReadVarA,
+ mlir::Value writtenVarB);
/// Log when an expression evaluation must be saved.
-static void LLVM_ATTRIBUTE_UNUSED logSaveEvaluation(llvm::raw_ostream &os,
- unsigned runid,
- mlir::Region &yieldRegion,
- bool anyWrite);
+[[maybe_unused]] static void logSaveEvaluation(llvm::raw_ostream &os,
+ unsigned runid,
+ mlir::Region &yieldRegion,
+ bool anyWrite);
/// Log when an assignment is scheduled.
-static void LLVM_ATTRIBUTE_UNUSED logAssignmentEvaluation(
- llvm::raw_ostream &os, unsigned runid, hlfir::RegionAssignOp assign);
+[[maybe_unused]] static void
+logAssignmentEvaluation(llvm::raw_ostream &os, unsigned runid,
+ hlfir::RegionAssignOp assign);
/// Log when starting to schedule an order assignment tree.
-static void LLVM_ATTRIBUTE_UNUSED logStartScheduling(
- llvm::raw_ostream &os, hlfir::OrderedAssignmentTreeOpInterface root);
+[[maybe_unused]] static void
+logStartScheduling(llvm::raw_ostream &os,
+ hlfir::OrderedAssignmentTreeOpInterface root);
/// Log op if effect value is not known.
-static void LLVM_ATTRIBUTE_UNUSED logIfUnkownEffectValue(
- llvm::raw_ostream &os, mlir::MemoryEffects::EffectInstance effect,
- mlir::Operation &op);
+[[maybe_unused]] static void
+logIfUnkownEffectValue(llvm::raw_ostream &os,
+ mlir::MemoryEffects::EffectInstance effect,
+ mlir::Operation &op);
//===----------------------------------------------------------------------===//
// Scheduling Implementation
@@ -701,23 +704,24 @@ static llvm::raw_ostream &printRegionPath(llvm::raw_ostream &os,
return printRegionId(os, yieldRegion);
}
-static void LLVM_ATTRIBUTE_UNUSED logSaveEvaluation(llvm::raw_ostream &os,
- unsigned runid,
- mlir::Region &yieldRegion,
- bool anyWrite) {
+[[maybe_unused]] static void logSaveEvaluation(llvm::raw_ostream &os,
+ unsigned runid,
+ mlir::Region &yieldRegion,
+ bool anyWrite) {
os << "run " << runid << " save " << (anyWrite ? "(w)" : " ") << ": ";
printRegionPath(os, yieldRegion) << "\n";
}
-static void LLVM_ATTRIBUTE_UNUSED logAssignmentEvaluation(
- llvm::raw_ostream &os, unsigned runid, hlfir::RegionAssignOp assign) {
+[[maybe_unused]] static void
+logAssignmentEvaluation(llvm::raw_ostream &os, unsigned runid,
+ hlfir::RegionAssignOp assign) {
os << "run " << runid << " evaluate: ";
printNodePath(os, assign.getOperation()) << "\n";
}
-static void LLVM_ATTRIBUTE_UNUSED logConflict(llvm::raw_ostream &os,
- mlir::Value writtenOrReadVarA,
- mlir::Value writtenVarB) {
+[[maybe_unused]] static void logConflict(llvm::raw_ostream &os,
+ mlir::Value writtenOrReadVarA,
+ mlir::Value writtenVarB) {
auto printIfValue = [&](mlir::Value var) -> llvm::raw_ostream & {
if (!var)
return os << "<unknown>";
@@ -728,8 +732,9 @@ static void LLVM_ATTRIBUTE_UNUSED logConflict(llvm::raw_ostream &os,
printIfValue(writtenVarB) << "\n";
}
-static void LLVM_ATTRIBUTE_UNUSED logStartScheduling(
- llvm::raw_ostream &os, hlfir::OrderedAssignmentTreeOpInterface root) {
+[[maybe_unused]] static void
+logStartScheduling(llvm::raw_ostream &os,
+ hlfir::OrderedAssignmentTreeOpInterface root) {
os << "------------ scheduling ";
printNodePath(os, root.getOperation());
if (auto funcOp = root->getParentOfType<mlir::func::FuncOp>())
@@ -737,9 +742,10 @@ static void LLVM_ATTRIBUTE_UNUSED logStartScheduling(
os << "------------\n";
}
-static void LLVM_ATTRIBUTE_UNUSED logIfUnkownEffectValue(
- llvm::raw_ostream &os, mlir::MemoryEffects::EffectInstance effect,
- mlir::Operation &op) {
+[[maybe_unused]] static void
+logIfUnkownEffectValue(llvm::raw_ostream &os,
+ mlir::MemoryEffects::EffectInstance effect,
+ mlir::Operation &op) {
if (effect.getValue() != nullptr)
return;
os << "unknown effected value (";
diff --git a/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp b/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp
index 9bf10b5..ed9e41c 100644
--- a/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp
+++ b/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp
@@ -751,4 +751,245 @@ template bool OpenACCMappableModel<fir::PointerType>::generatePrivateDestroy(
mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
mlir::Value privatized) const;
+template <typename Ty>
+mlir::Value OpenACCPointerLikeModel<Ty>::genAllocate(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ llvm::StringRef varName, mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const {
+
+ // Unwrap to get the pointee type.
+ mlir::Type pointeeTy = fir::dyn_cast_ptrEleTy(pointer);
+ assert(pointeeTy && "expected pointee type to be extractable");
+
+ // Box types are descriptors that contain both metadata and a pointer to data.
+ // The `genAllocate` API is designed for simple allocations and cannot
+ // properly handle the dual nature of boxes. Using `generatePrivateInit`
+ // instead can allocate both the descriptor and its referenced data. For use
+ // cases that require an empty descriptor storage, potentially this could be
+ // implemented here.
+ if (fir::isa_box_type(pointeeTy))
+ return {};
+
+ // Unlimited polymorphic (class(*)) cannot be handled - size unknown
+ if (fir::isUnlimitedPolymorphicType(pointeeTy))
+ return {};
+
+ // Return null for dynamic size types because the size of the
+ // allocation cannot be determined simply from the type.
+ if (fir::hasDynamicSize(pointeeTy))
+ return {};
+
+ // Use heap allocation for fir.heap, stack allocation for others (fir.ref,
+ // fir.ptr, fir.llvm_ptr). For fir.ptr, which is supposed to represent a
+ // Fortran pointer type, it feels a bit odd to "allocate" since it is meant
+ // to point to an existing entity - but one can imagine where a pointee is
+ // privatized - thus it makes sense to issue an allocate.
+ mlir::Value allocation;
+ if (std::is_same_v<Ty, fir::HeapType>) {
+ needsFree = true;
+ allocation = fir::AllocMemOp::create(builder, loc, pointeeTy);
+ } else {
+ needsFree = false;
+ allocation = fir::AllocaOp::create(builder, loc, pointeeTy);
+ }
+
+ // Convert to the requested pointer type if needed.
+ // This means converting from a fir.ref to either a fir.llvm_ptr or a fir.ptr.
+ // fir.heap is already correct type in this case.
+ if (allocation.getType() != pointer) {
+ assert(!(std::is_same_v<Ty, fir::HeapType>) &&
+ "fir.heap is already correct type because of allocmem");
+ return fir::ConvertOp::create(builder, loc, pointer, allocation);
+ }
+
+ return allocation;
+}
+
+template mlir::Value OpenACCPointerLikeModel<fir::ReferenceType>::genAllocate(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ llvm::StringRef varName, mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const;
+
+template mlir::Value OpenACCPointerLikeModel<fir::PointerType>::genAllocate(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ llvm::StringRef varName, mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const;
+
+template mlir::Value OpenACCPointerLikeModel<fir::HeapType>::genAllocate(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ llvm::StringRef varName, mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const;
+
+template mlir::Value OpenACCPointerLikeModel<fir::LLVMPointerType>::genAllocate(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ llvm::StringRef varName, mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const;
+
+static mlir::Value stripCasts(mlir::Value value, bool stripDeclare = true) {
+ mlir::Value currentValue = value;
+
+ while (currentValue) {
+ auto *definingOp = currentValue.getDefiningOp();
+ if (!definingOp)
+ break;
+
+ if (auto convertOp = mlir::dyn_cast<fir::ConvertOp>(definingOp)) {
+ currentValue = convertOp.getValue();
+ continue;
+ }
+
+ if (auto viewLike = mlir::dyn_cast<mlir::ViewLikeOpInterface>(definingOp)) {
+ currentValue = viewLike.getViewSource();
+ continue;
+ }
+
+ if (stripDeclare) {
+ if (auto declareOp = mlir::dyn_cast<hlfir::DeclareOp>(definingOp)) {
+ currentValue = declareOp.getMemref();
+ continue;
+ }
+
+ if (auto declareOp = mlir::dyn_cast<fir::DeclareOp>(definingOp)) {
+ currentValue = declareOp.getMemref();
+ continue;
+ }
+ }
+ break;
+ }
+
+ return currentValue;
+}
+
+template <typename Ty>
+bool OpenACCPointerLikeModel<Ty>::genFree(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const {
+
+ // Unwrap to get the pointee type.
+ mlir::Type pointeeTy = fir::dyn_cast_ptrEleTy(pointer);
+ assert(pointeeTy && "expected pointee type to be extractable");
+
+ // Box types contain both a descriptor and data. The `genFree` API
+ // handles simple deallocations and cannot properly manage both parts.
+ // Using `generatePrivateDestroy` instead can free both the descriptor and
+ // its referenced data.
+ if (fir::isa_box_type(pointeeTy))
+ return false;
+
+ // If pointer type is HeapType, assume it's a heap allocation
+ if (std::is_same_v<Ty, fir::HeapType>) {
+ fir::FreeMemOp::create(builder, loc, varToFree);
+ return true;
+ }
+
+ // Use allocRes if provided to determine the allocation type
+ mlir::Value valueToInspect = allocRes ? allocRes : varToFree;
+
+ // Strip casts and declare operations to find the original allocation
+ mlir::Value strippedValue = stripCasts(valueToInspect);
+ mlir::Operation *originalAlloc = strippedValue.getDefiningOp();
+
+ // If we found an AllocMemOp (heap allocation), free it
+ if (mlir::isa_and_nonnull<fir::AllocMemOp>(originalAlloc)) {
+ mlir::Value toFree = varToFree;
+ if (!mlir::isa<fir::HeapType>(valueToInspect.getType()))
+ toFree = fir::ConvertOp::create(
+ builder, loc,
+ fir::HeapType::get(varToFree.getType().getElementType()), toFree);
+ fir::FreeMemOp::create(builder, loc, toFree);
+ return true;
+ }
+
+ // If we found an AllocaOp (stack allocation), no deallocation needed
+ if (mlir::isa_and_nonnull<fir::AllocaOp>(originalAlloc))
+ return true;
+
+ // Unable to determine allocation type
+ return false;
+}
+
+template bool OpenACCPointerLikeModel<fir::ReferenceType>::genFree(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::PointerType>::genFree(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::HeapType>::genFree(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::LLVMPointerType>::genFree(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const;
+
+template <typename Ty>
+bool OpenACCPointerLikeModel<Ty>::genCopy(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const {
+
+ // Check that source and destination types match
+ if (source.getType() != destination.getType())
+ return false;
+
+ // Unwrap to get the pointee type.
+ mlir::Type pointeeTy = fir::dyn_cast_ptrEleTy(pointer);
+ assert(pointeeTy && "expected pointee type to be extractable");
+
+ // Box types contain both a descriptor and referenced data. The genCopy API
+ // handles simple copies and cannot properly manage both parts.
+ if (fir::isa_box_type(pointeeTy))
+ return false;
+
+ // Unlimited polymorphic (class(*)) cannot be handled because source and
+ // destination types are not known.
+ if (fir::isUnlimitedPolymorphicType(pointeeTy))
+ return false;
+
+ // Return false for dynamic size types because the copy logic
+ // cannot be determined simply from the type.
+ if (fir::hasDynamicSize(pointeeTy))
+ return false;
+
+ if (fir::isa_trivial(pointeeTy)) {
+ auto loadVal = fir::LoadOp::create(builder, loc, source);
+ fir::StoreOp::create(builder, loc, loadVal, destination);
+ } else {
+ hlfir::AssignOp::create(builder, loc, source, destination);
+ }
+ return true;
+}
+
+template bool OpenACCPointerLikeModel<fir::ReferenceType>::genCopy(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::PointerType>::genCopy(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::HeapType>::genCopy(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::LLVMPointerType>::genCopy(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const;
+
} // namespace fir::acc
diff --git a/flang/lib/Optimizer/OpenMP/CMakeLists.txt b/flang/lib/Optimizer/OpenMP/CMakeLists.txt
index b85ee7e..23a7dc8 100644
--- a/flang/lib/Optimizer/OpenMP/CMakeLists.txt
+++ b/flang/lib/Optimizer/OpenMP/CMakeLists.txt
@@ -8,6 +8,7 @@ add_flang_library(FlangOpenMPTransforms
MapsForPrivatizedSymbols.cpp
MapInfoFinalization.cpp
MarkDeclareTarget.cpp
+ LowerWorkdistribute.cpp
LowerWorkshare.cpp
LowerNontemporal.cpp
SimdOnly.cpp
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp
new file mode 100644
index 0000000..9278e17
--- /dev/null
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp
@@ -0,0 +1,1852 @@
+//===- LowerWorkdistribute.cpp
+//-------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the lowering and optimisations of omp.workdistribute.
+//
+// Fortran array statements are lowered to fir as fir.do_loop unordered.
+// lower-workdistribute pass works mainly on identifying fir.do_loop unordered
+// that is nested in target{teams{workdistribute{fir.do_loop unordered}}} and
+// lowers it to target{teams{parallel{distribute{wsloop{loop_nest}}}}}.
+// It hoists all the other ops outside target region.
+// Relaces heap allocation on target with omp.target_allocmem and
+// deallocation with omp.target_freemem from host. Also replaces
+// runtime function "Assign" with omp_target_memcpy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Dialect/FIRDialect.h"
+#include "flang/Optimizer/Dialect/FIROps.h"
+#include "flang/Optimizer/Dialect/FIRType.h"
+#include "flang/Optimizer/HLFIR/Passes.h"
+#include "flang/Optimizer/OpenMP/Utils.h"
+#include "flang/Optimizer/Transforms/Passes.h"
+#include "mlir/Analysis/SliceAnalysis.h"
+#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/Value.h"
+#include "mlir/Transforms/DialectConversion.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+#include "mlir/Transforms/RegionUtils.h"
+#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include <mlir/Dialect/Arith/IR/Arith.h>
+#include <mlir/Dialect/LLVMIR/LLVMTypes.h>
+#include <mlir/Dialect/Utils/IndexingUtils.h>
+#include <mlir/IR/BlockSupport.h>
+#include <mlir/IR/BuiltinOps.h>
+#include <mlir/IR/Diagnostics.h>
+#include <mlir/IR/IRMapping.h>
+#include <mlir/IR/PatternMatch.h>
+#include <mlir/Interfaces/SideEffectInterfaces.h>
+#include <mlir/Support/LLVM.h>
+#include <optional>
+#include <variant>
+
+namespace flangomp {
+#define GEN_PASS_DEF_LOWERWORKDISTRIBUTE
+#include "flang/Optimizer/OpenMP/Passes.h.inc"
+} // namespace flangomp
+
+#define DEBUG_TYPE "lower-workdistribute"
+
+using namespace mlir;
+
+namespace {
+
+/// This string is used to identify the Fortran-specific runtime FortranAAssign.
+static constexpr llvm::StringRef FortranAssignStr = "_FortranAAssign";
+
+/// The isRuntimeCall function is a utility designed to determine
+/// if a given operation is a call to a Fortran-specific runtime function.
+static bool isRuntimeCall(Operation *op) {
+ if (auto callOp = dyn_cast<fir::CallOp>(op)) {
+ auto callee = callOp.getCallee();
+ if (!callee)
+ return false;
+ auto *func = op->getParentOfType<ModuleOp>().lookupSymbol(*callee);
+ if (func->getAttr(fir::FIROpsDialect::getFirRuntimeAttrName()))
+ return true;
+ }
+ return false;
+}
+
+/// This is the single source of truth about whether we should parallelize an
+/// operation nested in an omp.workdistribute region.
+/// Parallelize here refers to dividing into units of work.
+static bool shouldParallelize(Operation *op) {
+ // True if the op is a runtime call to Assign
+ if (isRuntimeCall(op)) {
+ fir::CallOp runtimeCall = cast<fir::CallOp>(op);
+ auto funcName = runtimeCall.getCallee()->getRootReference().getValue();
+ if (funcName == FortranAssignStr) {
+ return true;
+ }
+ }
+ // We cannot parallelize ops with side effects.
+ // Parallelizable operations should not produce
+ // values that other operations depend on
+ if (llvm::any_of(op->getResults(),
+ [](OpResult v) -> bool { return !v.use_empty(); }))
+ return false;
+ // We will parallelize unordered loops - these come from array syntax
+ if (auto loop = dyn_cast<fir::DoLoopOp>(op)) {
+ auto unordered = loop.getUnordered();
+ if (!unordered)
+ return false;
+ return *unordered;
+ }
+ // We cannot parallelize anything else.
+ return false;
+}
+
+/// The getPerfectlyNested function is a generic utility for finding
+/// a single, "perfectly nested" operation within a parent operation.
+template <typename T>
+static T getPerfectlyNested(Operation *op) {
+ if (op->getNumRegions() != 1)
+ return nullptr;
+ auto &region = op->getRegion(0);
+ if (region.getBlocks().size() != 1)
+ return nullptr;
+ auto *block = &region.front();
+ auto *firstOp = &block->front();
+ if (auto nested = dyn_cast<T>(firstOp))
+ if (firstOp->getNextNode() == block->getTerminator())
+ return nested;
+ return nullptr;
+}
+
+/// verifyTargetTeamsWorkdistribute method verifies that
+/// omp.target { teams { workdistribute { ... } } } is well formed
+/// and fails for function calls that don't have lowering implemented yet.
+static LogicalResult
+verifyTargetTeamsWorkdistribute(omp::WorkdistributeOp workdistribute) {
+ OpBuilder rewriter(workdistribute);
+ auto loc = workdistribute->getLoc();
+ auto teams = dyn_cast<omp::TeamsOp>(workdistribute->getParentOp());
+ if (!teams) {
+ emitError(loc, "workdistribute not nested in teams\n");
+ return failure();
+ }
+ if (workdistribute.getRegion().getBlocks().size() != 1) {
+ emitError(loc, "workdistribute with multiple blocks\n");
+ return failure();
+ }
+ if (teams.getRegion().getBlocks().size() != 1) {
+ emitError(loc, "teams with multiple blocks\n");
+ return failure();
+ }
+
+ bool foundWorkdistribute = false;
+ for (auto &op : teams.getOps()) {
+ if (isa<omp::WorkdistributeOp>(op)) {
+ if (foundWorkdistribute) {
+ emitError(loc, "teams has multiple workdistribute ops.\n");
+ return failure();
+ }
+ foundWorkdistribute = true;
+ continue;
+ }
+ // Identify any omp dialect ops present before/after workdistribute.
+ if (op.getDialect() && isa<omp::OpenMPDialect>(op.getDialect()) &&
+ !isa<omp::TerminatorOp>(op)) {
+ emitError(loc, "teams has omp ops other than workdistribute. Lowering "
+ "not implemented yet.\n");
+ return failure();
+ }
+ }
+
+ omp::TargetOp targetOp = dyn_cast<omp::TargetOp>(teams->getParentOp());
+ // return if not omp.target
+ if (!targetOp)
+ return success();
+
+ for (auto &op : workdistribute.getOps()) {
+ if (auto callOp = dyn_cast<fir::CallOp>(op)) {
+ if (isRuntimeCall(&op)) {
+ auto funcName = (*callOp.getCallee()).getRootReference().getValue();
+ // _FortranAAssign is handled. Other runtime calls are not supported
+ // in omp.workdistribute yet.
+ if (funcName == FortranAssignStr)
+ continue;
+ else {
+ emitError(loc, "Runtime call " + funcName +
+ " lowering not supported for workdistribute yet.");
+ return failure();
+ }
+ }
+ }
+ }
+ return success();
+}
+
+/// fissionWorkdistribute method finds the parallelizable ops
+/// within teams {workdistribute} region and moves them to their
+/// own teams{workdistribute} region.
+///
+/// If B() and D() are parallelizable,
+///
+/// omp.teams {
+/// omp.workdistribute {
+/// A()
+/// B()
+/// C()
+/// D()
+/// E()
+/// }
+/// }
+///
+/// becomes
+///
+/// A()
+/// omp.teams {
+/// omp.workdistribute {
+/// B()
+/// }
+/// }
+/// C()
+/// omp.teams {
+/// omp.workdistribute {
+/// D()
+/// }
+/// }
+/// E()
+static FailureOr<bool>
+fissionWorkdistribute(omp::WorkdistributeOp workdistribute) {
+ OpBuilder rewriter(workdistribute);
+ auto loc = workdistribute->getLoc();
+ auto teams = dyn_cast<omp::TeamsOp>(workdistribute->getParentOp());
+ auto *teamsBlock = &teams.getRegion().front();
+ bool changed = false;
+ // Move the ops inside teams and before workdistribute outside.
+ IRMapping irMapping;
+ llvm::SmallVector<Operation *> teamsHoisted;
+ for (auto &op : teams.getOps()) {
+ if (&op == workdistribute) {
+ break;
+ }
+ if (shouldParallelize(&op)) {
+ emitError(loc, "teams has parallelize ops before first workdistribute\n");
+ return failure();
+ } else {
+ rewriter.setInsertionPoint(teams);
+ rewriter.clone(op, irMapping);
+ teamsHoisted.push_back(&op);
+ changed = true;
+ }
+ }
+ for (auto *op : llvm::reverse(teamsHoisted)) {
+ op->replaceAllUsesWith(irMapping.lookup(op));
+ op->erase();
+ }
+
+ // While we have unhandled operations in the original workdistribute
+ auto *workdistributeBlock = &workdistribute.getRegion().front();
+ auto *terminator = workdistributeBlock->getTerminator();
+ while (&workdistributeBlock->front() != terminator) {
+ rewriter.setInsertionPoint(teams);
+ IRMapping mapping;
+ llvm::SmallVector<Operation *> hoisted;
+ Operation *parallelize = nullptr;
+ for (auto &op : workdistribute.getOps()) {
+ if (&op == terminator) {
+ break;
+ }
+ if (shouldParallelize(&op)) {
+ parallelize = &op;
+ break;
+ } else {
+ rewriter.clone(op, mapping);
+ hoisted.push_back(&op);
+ changed = true;
+ }
+ }
+
+ for (auto *op : llvm::reverse(hoisted)) {
+ op->replaceAllUsesWith(mapping.lookup(op));
+ op->erase();
+ }
+
+ if (parallelize && hoisted.empty() &&
+ parallelize->getNextNode() == terminator)
+ break;
+ if (parallelize) {
+ auto newTeams = rewriter.cloneWithoutRegions(teams);
+ auto *newTeamsBlock = rewriter.createBlock(
+ &newTeams.getRegion(), newTeams.getRegion().begin(), {}, {});
+ for (auto arg : teamsBlock->getArguments())
+ newTeamsBlock->addArgument(arg.getType(), arg.getLoc());
+ auto newWorkdistribute = rewriter.create<omp::WorkdistributeOp>(loc);
+ rewriter.create<omp::TerminatorOp>(loc);
+ rewriter.createBlock(&newWorkdistribute.getRegion(),
+ newWorkdistribute.getRegion().begin(), {}, {});
+ auto *cloned = rewriter.clone(*parallelize);
+ parallelize->replaceAllUsesWith(cloned);
+ parallelize->erase();
+ rewriter.create<omp::TerminatorOp>(loc);
+ changed = true;
+ }
+ }
+ return changed;
+}
+
+/// Generate omp.parallel operation with an empty region.
+static void genParallelOp(Location loc, OpBuilder &rewriter, bool composite) {
+ auto parallelOp = rewriter.create<mlir::omp::ParallelOp>(loc);
+ parallelOp.setComposite(composite);
+ rewriter.createBlock(&parallelOp.getRegion());
+ rewriter.setInsertionPoint(rewriter.create<mlir::omp::TerminatorOp>(loc));
+ return;
+}
+
+/// Generate omp.distribute operation with an empty region.
+static void genDistributeOp(Location loc, OpBuilder &rewriter, bool composite) {
+ mlir::omp::DistributeOperands distributeClauseOps;
+ auto distributeOp =
+ rewriter.create<mlir::omp::DistributeOp>(loc, distributeClauseOps);
+ distributeOp.setComposite(composite);
+ auto distributeBlock = rewriter.createBlock(&distributeOp.getRegion());
+ rewriter.setInsertionPointToStart(distributeBlock);
+ return;
+}
+
+/// Generate loop nest clause operands from fir.do_loop operation.
+static void
+genLoopNestClauseOps(OpBuilder &rewriter, fir::DoLoopOp loop,
+ mlir::omp::LoopNestOperands &loopNestClauseOps) {
+ assert(loopNestClauseOps.loopLowerBounds.empty() &&
+ "Loop nest bounds were already emitted!");
+ loopNestClauseOps.loopLowerBounds.push_back(loop.getLowerBound());
+ loopNestClauseOps.loopUpperBounds.push_back(loop.getUpperBound());
+ loopNestClauseOps.loopSteps.push_back(loop.getStep());
+ loopNestClauseOps.loopInclusive = rewriter.getUnitAttr();
+}
+
+/// Generate omp.wsloop operation with an empty region and
+/// clone the body of fir.do_loop operation inside the loop nest region.
+static void genWsLoopOp(mlir::OpBuilder &rewriter, fir::DoLoopOp doLoop,
+ const mlir::omp::LoopNestOperands &clauseOps,
+ bool composite) {
+
+ auto wsloopOp = rewriter.create<mlir::omp::WsloopOp>(doLoop.getLoc());
+ wsloopOp.setComposite(composite);
+ rewriter.createBlock(&wsloopOp.getRegion());
+
+ auto loopNestOp =
+ rewriter.create<mlir::omp::LoopNestOp>(doLoop.getLoc(), clauseOps);
+
+ // Clone the loop's body inside the loop nest construct using the
+ // mapped values.
+ rewriter.cloneRegionBefore(doLoop.getRegion(), loopNestOp.getRegion(),
+ loopNestOp.getRegion().begin());
+ Block *clonedBlock = &loopNestOp.getRegion().back();
+ mlir::Operation *terminatorOp = clonedBlock->getTerminator();
+
+ // Erase fir.result op of do loop and create yield op.
+ if (auto resultOp = dyn_cast<fir::ResultOp>(terminatorOp)) {
+ rewriter.setInsertionPoint(terminatorOp);
+ rewriter.create<mlir::omp::YieldOp>(doLoop->getLoc());
+ terminatorOp->erase();
+ }
+}
+
+/// workdistributeDoLower method finds the fir.do_loop unoredered
+/// nested in teams {workdistribute{fir.do_loop unoredered}} and
+/// lowers it to teams {parallel { distribute {wsloop {loop_nest}}}}.
+///
+/// If fir.do_loop is present inside teams workdistribute
+///
+/// omp.teams {
+/// omp.workdistribute {
+/// fir.do_loop unoredered {
+/// ...
+/// }
+/// }
+/// }
+///
+/// Then, its lowered to
+///
+/// omp.teams {
+/// omp.parallel {
+/// omp.distribute {
+/// omp.wsloop {
+/// omp.loop_nest
+/// ...
+/// }
+/// }
+/// }
+/// }
+/// }
+static bool
+workdistributeDoLower(omp::WorkdistributeOp workdistribute,
+ SetVector<omp::TargetOp> &targetOpsToProcess) {
+ OpBuilder rewriter(workdistribute);
+ auto doLoop = getPerfectlyNested<fir::DoLoopOp>(workdistribute);
+ auto wdLoc = workdistribute->getLoc();
+ if (doLoop && shouldParallelize(doLoop)) {
+ assert(doLoop.getReduceOperands().empty());
+
+ // Record the target ops to process later
+ if (auto teamsOp = dyn_cast<omp::TeamsOp>(workdistribute->getParentOp())) {
+ auto targetOp = dyn_cast<omp::TargetOp>(teamsOp->getParentOp());
+ if (targetOp) {
+ targetOpsToProcess.insert(targetOp);
+ }
+ }
+ // Generate the nested parallel, distribute, wsloop and loop_nest ops.
+ genParallelOp(wdLoc, rewriter, true);
+ genDistributeOp(wdLoc, rewriter, true);
+ mlir::omp::LoopNestOperands loopNestClauseOps;
+ genLoopNestClauseOps(rewriter, doLoop, loopNestClauseOps);
+ genWsLoopOp(rewriter, doLoop, loopNestClauseOps, true);
+ workdistribute.erase();
+ return true;
+ }
+ return false;
+}
+
+/// Check if the enclosed type in fir.ref is fir.box and fir.box encloses array
+static bool isEnclosedTypeRefToBoxArray(Type type) {
+ // Check if it's a reference type
+ if (auto refType = dyn_cast<fir::ReferenceType>(type)) {
+ // Get the referenced type (should be fir.box)
+ auto referencedType = refType.getEleTy();
+ // Check if referenced type is a box
+ if (auto boxType = dyn_cast<fir::BoxType>(referencedType)) {
+ // Get the boxed type and check if it's an array
+ auto boxedType = boxType.getEleTy();
+ // Check if boxed type is a sequence (array)
+ return isa<fir::SequenceType>(boxedType);
+ }
+ }
+ return false;
+}
+
+/// Check if the enclosed type in fir.box is scalar (not array)
+static bool isEnclosedTypeBoxScalar(Type type) {
+ // Check if it's a box type
+ if (auto boxType = dyn_cast<fir::BoxType>(type)) {
+ // Get the boxed type
+ auto boxedType = boxType.getEleTy();
+ // Check if boxed type is NOT a sequence (array)
+ return !isa<fir::SequenceType>(boxedType);
+ }
+ return false;
+}
+
+/// Check if the FortranAAssign call has src as scalar and dest as array
+static bool isFortranAssignSrcScalarAndDestArray(fir::CallOp callOp) {
+ if (callOp.getNumOperands() < 2)
+ return false;
+ auto srcArg = callOp.getOperand(1);
+ auto destArg = callOp.getOperand(0);
+ // Both operands should be fir.convert ops
+ auto srcConvert = srcArg.getDefiningOp<fir::ConvertOp>();
+ auto destConvert = destArg.getDefiningOp<fir::ConvertOp>();
+ if (!srcConvert || !destConvert) {
+ emitError(callOp->getLoc(),
+ "Unimplemented: FortranAssign to OpenMP lowering\n");
+ return false;
+ }
+ // Get the original types before conversion
+ auto srcOrigType = srcConvert.getValue().getType();
+ auto destOrigType = destConvert.getValue().getType();
+
+ // Check if src is scalar and dest is array
+ bool srcIsScalar = isEnclosedTypeBoxScalar(srcOrigType);
+ bool destIsArray = isEnclosedTypeRefToBoxArray(destOrigType);
+ return srcIsScalar && destIsArray;
+}
+
+/// Convert a flat index to multi-dimensional indices for an array box
+/// Example: 2D array with shape (2,4)
+/// Col 1 Col 2 Col 3 Col 4
+/// Row 1: (1,1) (1,2) (1,3) (1,4)
+/// Row 2: (2,1) (2,2) (2,3) (2,4)
+///
+/// extents: (2,4)
+///
+/// flatIdx: 0 1 2 3 4 5 6 7
+/// Indices: (1,1) (1,2) (1,3) (1,4) (2,1) (2,2) (2,3) (2,4)
+static SmallVector<Value> convertFlatToMultiDim(OpBuilder &builder,
+ Location loc, Value flatIdx,
+ Value arrayBox) {
+ // Get array type and rank
+ auto boxType = cast<fir::BoxType>(arrayBox.getType());
+ auto seqType = cast<fir::SequenceType>(boxType.getEleTy());
+ int rank = seqType.getDimension();
+
+ // Get all extents
+ SmallVector<Value> extents;
+ // Get extents for each dimension
+ for (int i = 0; i < rank; ++i) {
+ auto dimIdx = arith::ConstantIndexOp::create(builder, loc, i);
+ auto boxDims = fir::BoxDimsOp::create(builder, loc, arrayBox, dimIdx);
+ extents.push_back(boxDims.getResult(1));
+ }
+
+ // Convert flat index to multi-dimensional indices
+ SmallVector<Value> indices(rank);
+ Value temp = flatIdx;
+ auto c1 = builder.create<arith::ConstantIndexOp>(loc, 1);
+
+ // Work backwards through dimensions (row-major order)
+ for (int i = rank - 1; i >= 0; --i) {
+ Value zeroBasedIdx = builder.create<arith::RemSIOp>(loc, temp, extents[i]);
+ // Convert to one-based index
+ indices[i] = builder.create<arith::AddIOp>(loc, zeroBasedIdx, c1);
+ if (i > 0) {
+ temp = builder.create<arith::DivSIOp>(loc, temp, extents[i]);
+ }
+ }
+
+ return indices;
+}
+
+/// Calculate the total number of elements in the array box
+/// (totalElems = extent(1) * extent(2) * ... * extent(n))
+static Value CalculateTotalElements(OpBuilder &builder, Location loc,
+ Value arrayBox) {
+ auto boxType = cast<fir::BoxType>(arrayBox.getType());
+ auto seqType = cast<fir::SequenceType>(boxType.getEleTy());
+ int rank = seqType.getDimension();
+
+ Value totalElems = nullptr;
+ for (int i = 0; i < rank; ++i) {
+ auto dimIdx = arith::ConstantIndexOp::create(builder, loc, i);
+ auto boxDims = fir::BoxDimsOp::create(builder, loc, arrayBox, dimIdx);
+ Value extent = boxDims.getResult(1);
+ if (i == 0) {
+ totalElems = extent;
+ } else {
+ totalElems = builder.create<arith::MulIOp>(loc, totalElems, extent);
+ }
+ }
+ return totalElems;
+}
+
+/// Replace the FortranAAssign runtime call with an unordered do loop
+static void replaceWithUnorderedDoLoop(OpBuilder &builder, Location loc,
+ omp::TeamsOp teamsOp,
+ omp::WorkdistributeOp workdistribute,
+ fir::CallOp callOp) {
+ auto destConvert = callOp.getOperand(0).getDefiningOp<fir::ConvertOp>();
+ auto srcConvert = callOp.getOperand(1).getDefiningOp<fir::ConvertOp>();
+
+ Value destBox = destConvert.getValue();
+ Value srcBox = srcConvert.getValue();
+
+ // get defining alloca op of destBox and srcBox
+ auto destAlloca = destBox.getDefiningOp<fir::AllocaOp>();
+
+ if (!destAlloca) {
+ emitError(loc, "Unimplemented: FortranAssign to OpenMP lowering\n");
+ return;
+ }
+
+ // get the store op that stores to the alloca
+ for (auto user : destAlloca->getUsers()) {
+ if (auto storeOp = dyn_cast<fir::StoreOp>(user)) {
+ destBox = storeOp.getValue();
+ break;
+ }
+ }
+
+ builder.setInsertionPoint(teamsOp);
+ // Load destination array box (if it's a reference)
+ Value arrayBox = destBox;
+ if (isa<fir::ReferenceType>(destBox.getType()))
+ arrayBox = builder.create<fir::LoadOp>(loc, destBox);
+
+ auto scalarValue = builder.create<fir::BoxAddrOp>(loc, srcBox);
+ Value scalar = builder.create<fir::LoadOp>(loc, scalarValue);
+
+ // Calculate total number of elements (flattened)
+ auto c0 = builder.create<arith::ConstantIndexOp>(loc, 0);
+ auto c1 = builder.create<arith::ConstantIndexOp>(loc, 1);
+ Value totalElems = CalculateTotalElements(builder, loc, arrayBox);
+
+ auto *workdistributeBlock = &workdistribute.getRegion().front();
+ builder.setInsertionPointToStart(workdistributeBlock);
+ // Create single unordered loop for flattened array
+ auto doLoop = fir::DoLoopOp::create(builder, loc, c0, totalElems, c1, true);
+ Block *loopBlock = &doLoop.getRegion().front();
+ builder.setInsertionPointToStart(doLoop.getBody());
+
+ auto flatIdx = loopBlock->getArgument(0);
+ SmallVector<Value> indices =
+ convertFlatToMultiDim(builder, loc, flatIdx, arrayBox);
+ // Use fir.array_coor for linear addressing
+ auto elemPtr = fir::ArrayCoorOp::create(
+ builder, loc, fir::ReferenceType::get(scalar.getType()), arrayBox,
+ nullptr, nullptr, ValueRange{indices}, ValueRange{});
+
+ builder.create<fir::StoreOp>(loc, scalar, elemPtr);
+}
+
+/// workdistributeRuntimeCallLower method finds the runtime calls
+/// nested in teams {workdistribute{}} and
+/// lowers FortranAAssign to unordered do loop if src is scalar and dest is
+/// array. Other runtime calls are not handled currently.
+static FailureOr<bool>
+workdistributeRuntimeCallLower(omp::WorkdistributeOp workdistribute,
+ SetVector<omp::TargetOp> &targetOpsToProcess) {
+ OpBuilder rewriter(workdistribute);
+ auto loc = workdistribute->getLoc();
+ auto teams = dyn_cast<omp::TeamsOp>(workdistribute->getParentOp());
+ if (!teams) {
+ emitError(loc, "workdistribute not nested in teams\n");
+ return failure();
+ }
+ if (workdistribute.getRegion().getBlocks().size() != 1) {
+ emitError(loc, "workdistribute with multiple blocks\n");
+ return failure();
+ }
+ if (teams.getRegion().getBlocks().size() != 1) {
+ emitError(loc, "teams with multiple blocks\n");
+ return failure();
+ }
+ bool changed = false;
+ // Get the target op parent of teams
+ omp::TargetOp targetOp = dyn_cast<omp::TargetOp>(teams->getParentOp());
+ SmallVector<Operation *> opsToErase;
+ for (auto &op : workdistribute.getOps()) {
+ if (isRuntimeCall(&op)) {
+ rewriter.setInsertionPoint(&op);
+ fir::CallOp runtimeCall = cast<fir::CallOp>(op);
+ auto funcName = runtimeCall.getCallee()->getRootReference().getValue();
+ if (funcName == FortranAssignStr) {
+ if (isFortranAssignSrcScalarAndDestArray(runtimeCall) && targetOp) {
+ // Record the target ops to process later
+ targetOpsToProcess.insert(targetOp);
+ replaceWithUnorderedDoLoop(rewriter, loc, teams, workdistribute,
+ runtimeCall);
+ opsToErase.push_back(&op);
+ changed = true;
+ }
+ }
+ }
+ }
+ // Erase the runtime calls that have been replaced.
+ for (auto *op : opsToErase) {
+ op->erase();
+ }
+ return changed;
+}
+
+/// teamsWorkdistributeToSingleOp method hoists all the ops inside
+/// teams {workdistribute{}} before teams op.
+///
+/// If A() and B () are present inside teams workdistribute
+///
+/// omp.teams {
+/// omp.workdistribute {
+/// A()
+/// B()
+/// }
+/// }
+///
+/// Then, its lowered to
+///
+/// A()
+/// B()
+///
+/// If only the terminator remains in teams after hoisting, we erase teams op.
+static bool
+teamsWorkdistributeToSingleOp(omp::TeamsOp teamsOp,
+ SetVector<omp::TargetOp> &targetOpsToProcess) {
+ auto workdistributeOp = getPerfectlyNested<omp::WorkdistributeOp>(teamsOp);
+ if (!workdistributeOp)
+ return false;
+ // Get the block containing teamsOp (the parent block).
+ Block *parentBlock = teamsOp->getBlock();
+ Block &workdistributeBlock = *workdistributeOp.getRegion().begin();
+ // Record the target ops to process later
+ for (auto &op : workdistributeBlock.getOperations()) {
+ if (shouldParallelize(&op)) {
+ auto targetOp = dyn_cast<omp::TargetOp>(teamsOp->getParentOp());
+ if (targetOp) {
+ targetOpsToProcess.insert(targetOp);
+ }
+ }
+ }
+ auto insertPoint = Block::iterator(teamsOp);
+ // Get the range of operations to move (excluding the terminator).
+ auto workdistributeBegin = workdistributeBlock.begin();
+ auto workdistributeEnd = workdistributeBlock.getTerminator()->getIterator();
+ // Move the operations from workdistribute block to before teamsOp.
+ parentBlock->getOperations().splice(insertPoint,
+ workdistributeBlock.getOperations(),
+ workdistributeBegin, workdistributeEnd);
+ // Erase the now-empty workdistributeOp.
+ workdistributeOp.erase();
+ Block &teamsBlock = *teamsOp.getRegion().begin();
+ // Check if only the terminator remains and erase teams op.
+ if (teamsBlock.getOperations().size() == 1 &&
+ teamsBlock.getTerminator() != nullptr) {
+ teamsOp.erase();
+ }
+ return true;
+}
+
+/// If multiple workdistribute are nested in a target regions, we will need to
+/// split the target region, but we want to preserve the data semantics of the
+/// original data region and avoid unnecessary data movement at each of the
+/// subkernels - we split the target region into a target_data{target}
+/// nest where only the outer one moves the data
+FailureOr<omp::TargetOp> splitTargetData(omp::TargetOp targetOp,
+ RewriterBase &rewriter) {
+ auto loc = targetOp->getLoc();
+ if (targetOp.getMapVars().empty()) {
+ emitError(loc, "Target region has no data maps\n");
+ return failure();
+ }
+ // Collect all the mapinfo ops
+ SmallVector<omp::MapInfoOp> mapInfos;
+ for (auto opr : targetOp.getMapVars()) {
+ auto mapInfo = cast<omp::MapInfoOp>(opr.getDefiningOp());
+ mapInfos.push_back(mapInfo);
+ }
+
+ rewriter.setInsertionPoint(targetOp);
+ SmallVector<Value> innerMapInfos;
+ SmallVector<Value> outerMapInfos;
+ // Create new mapinfo ops for the inner target region
+ for (auto mapInfo : mapInfos) {
+ auto originalMapType =
+ (llvm::omp::OpenMPOffloadMappingFlags)(mapInfo.getMapType());
+ auto originalCaptureType = mapInfo.getMapCaptureType();
+ llvm::omp::OpenMPOffloadMappingFlags newMapType;
+ mlir::omp::VariableCaptureKind newCaptureType;
+ // For bycopy, we keep the same map type and capture type
+ // For byref, we change the map type to none and keep the capture type
+ if (originalCaptureType == mlir::omp::VariableCaptureKind::ByCopy) {
+ newMapType = originalMapType;
+ newCaptureType = originalCaptureType;
+ } else if (originalCaptureType == mlir::omp::VariableCaptureKind::ByRef) {
+ newMapType = llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_NONE;
+ newCaptureType = originalCaptureType;
+ outerMapInfos.push_back(mapInfo);
+ } else {
+ emitError(targetOp->getLoc(), "Unhandled case");
+ return failure();
+ }
+ auto innerMapInfo = cast<omp::MapInfoOp>(rewriter.clone(*mapInfo));
+ innerMapInfo.setMapTypeAttr(rewriter.getIntegerAttr(
+ rewriter.getIntegerType(64, false),
+ static_cast<
+ std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+ newMapType)));
+ innerMapInfo.setMapCaptureType(newCaptureType);
+ innerMapInfos.push_back(innerMapInfo.getResult());
+ }
+
+ rewriter.setInsertionPoint(targetOp);
+ auto device = targetOp.getDevice();
+ auto ifExpr = targetOp.getIfExpr();
+ auto deviceAddrVars = targetOp.getHasDeviceAddrVars();
+ auto devicePtrVars = targetOp.getIsDevicePtrVars();
+ // Create the target data op
+ auto targetDataOp = rewriter.create<omp::TargetDataOp>(
+ loc, device, ifExpr, outerMapInfos, deviceAddrVars, devicePtrVars);
+ auto taregtDataBlock = rewriter.createBlock(&targetDataOp.getRegion());
+ rewriter.create<mlir::omp::TerminatorOp>(loc);
+ rewriter.setInsertionPointToStart(taregtDataBlock);
+ // Create the inner target op
+ auto newTargetOp = rewriter.create<omp::TargetOp>(
+ targetOp.getLoc(), targetOp.getAllocateVars(),
+ targetOp.getAllocatorVars(), targetOp.getBareAttr(),
+ targetOp.getDependKindsAttr(), targetOp.getDependVars(),
+ targetOp.getDevice(), targetOp.getHasDeviceAddrVars(),
+ targetOp.getHostEvalVars(), targetOp.getIfExpr(),
+ targetOp.getInReductionVars(), targetOp.getInReductionByrefAttr(),
+ targetOp.getInReductionSymsAttr(), targetOp.getIsDevicePtrVars(),
+ innerMapInfos, targetOp.getNowaitAttr(), targetOp.getPrivateVars(),
+ targetOp.getPrivateSymsAttr(), targetOp.getPrivateNeedsBarrierAttr(),
+ targetOp.getThreadLimit(), targetOp.getPrivateMapsAttr());
+ rewriter.inlineRegionBefore(targetOp.getRegion(), newTargetOp.getRegion(),
+ newTargetOp.getRegion().begin());
+ rewriter.replaceOp(targetOp, targetDataOp);
+ return newTargetOp;
+}
+
+/// getNestedOpToIsolate function is designed to identify a specific teams
+/// parallel op within the body of an omp::TargetOp that should be "isolated."
+/// This returns a tuple of op, if its first op in targetBlock, or if the op is
+/// last op in the traget block.
+static std::optional<std::tuple<Operation *, bool, bool>>
+getNestedOpToIsolate(omp::TargetOp targetOp) {
+ if (targetOp.getRegion().empty())
+ return std::nullopt;
+ auto *targetBlock = &targetOp.getRegion().front();
+ for (auto &op : *targetBlock) {
+ bool first = &op == &*targetBlock->begin();
+ bool last = op.getNextNode() == targetBlock->getTerminator();
+ if (first && last)
+ return std::nullopt;
+
+ if (isa<omp::TeamsOp>(&op))
+ return {{&op, first, last}};
+ }
+ return std::nullopt;
+}
+
+/// Temporary structure to hold the two mapinfo ops
+struct TempOmpVar {
+ omp::MapInfoOp from, to;
+};
+
+/// isPtr checks if the type is a pointer or reference type.
+static bool isPtr(Type ty) {
+ return isa<fir::ReferenceType>(ty) || isa<LLVM::LLVMPointerType>(ty);
+}
+
+/// getPtrTypeForOmp returns an LLVM pointer type for the given type.
+static Type getPtrTypeForOmp(Type ty) {
+ if (isPtr(ty))
+ return LLVM::LLVMPointerType::get(ty.getContext());
+ else
+ return fir::ReferenceType::get(ty);
+}
+
+/// allocateTempOmpVar allocates a temporary variable for OpenMP mapping
+static TempOmpVar allocateTempOmpVar(Location loc, Type ty,
+ RewriterBase &rewriter) {
+ MLIRContext &ctx = *ty.getContext();
+ Value alloc;
+ Type allocType;
+ auto llvmPtrTy = LLVM::LLVMPointerType::get(&ctx);
+ // Get the appropriate type for allocation
+ if (isPtr(ty)) {
+ Type intTy = rewriter.getI32Type();
+ auto one = rewriter.create<LLVM::ConstantOp>(loc, intTy, 1);
+ allocType = llvmPtrTy;
+ alloc = rewriter.create<LLVM::AllocaOp>(loc, llvmPtrTy, allocType, one);
+ allocType = intTy;
+ } else {
+ allocType = ty;
+ alloc = rewriter.create<fir::AllocaOp>(loc, allocType);
+ }
+ // Lambda to create mapinfo ops
+ auto getMapInfo = [&](uint64_t mappingFlags, const char *name) {
+ return rewriter.create<omp::MapInfoOp>(
+ loc, alloc.getType(), alloc, TypeAttr::get(allocType),
+ rewriter.getIntegerAttr(rewriter.getIntegerType(64, /*isSigned=*/false),
+ mappingFlags),
+ rewriter.getAttr<omp::VariableCaptureKindAttr>(
+ omp::VariableCaptureKind::ByRef),
+ /*varPtrPtr=*/Value{},
+ /*members=*/SmallVector<Value>{},
+ /*member_index=*/mlir::ArrayAttr{},
+ /*bounds=*/ValueRange(),
+ /*mapperId=*/mlir::FlatSymbolRefAttr(),
+ /*name=*/rewriter.getStringAttr(name), rewriter.getBoolAttr(false));
+ };
+ // Create mapinfo ops.
+ uint64_t mapFrom =
+ static_cast<std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM);
+ uint64_t mapTo =
+ static_cast<std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO);
+ auto mapInfoFrom = getMapInfo(mapFrom, "__flang_workdistribute_from");
+ auto mapInfoTo = getMapInfo(mapTo, "__flang_workdistribute_to");
+ return TempOmpVar{mapInfoFrom, mapInfoTo};
+}
+
+// usedOutsideSplit checks if a value is used outside the split operation.
+static bool usedOutsideSplit(Value v, Operation *split) {
+ if (!split)
+ return false;
+ auto targetOp = cast<omp::TargetOp>(split->getParentOp());
+ auto *targetBlock = &targetOp.getRegion().front();
+ for (auto *user : v.getUsers()) {
+ while (user->getBlock() != targetBlock) {
+ user = user->getParentOp();
+ }
+ if (!user->isBeforeInBlock(split))
+ return true;
+ }
+ return false;
+}
+
+/// isRecomputableAfterFission checks if an operation can be recomputed
+static bool isRecomputableAfterFission(Operation *op, Operation *splitBefore) {
+ // If the op has side effects, it cannot be recomputed.
+ // We consider fir.declare as having no side effects.
+ return isa<fir::DeclareOp>(op) || isMemoryEffectFree(op);
+}
+
+/// collectNonRecomputableDeps collects dependencies that cannot be recomputed
+static void collectNonRecomputableDeps(Value &v, omp::TargetOp targetOp,
+ SetVector<Operation *> &nonRecomputable,
+ SetVector<Operation *> &toCache,
+ SetVector<Operation *> &toRecompute) {
+ Operation *op = v.getDefiningOp();
+ // If v is a block argument, it must be from the targetOp.
+ if (!op) {
+ assert(cast<BlockArgument>(v).getOwner()->getParentOp() == targetOp);
+ return;
+ }
+ // If the op is in the nonRecomputable set, add it to toCache and return.
+ if (nonRecomputable.contains(op)) {
+ toCache.insert(op);
+ return;
+ }
+ // Add the op to toRecompute.
+ toRecompute.insert(op);
+ for (auto opr : op->getOperands())
+ collectNonRecomputableDeps(opr, targetOp, nonRecomputable, toCache,
+ toRecompute);
+}
+
+/// createBlockArgsAndMap creates block arguments and maps them
+static void createBlockArgsAndMap(Location loc, RewriterBase &rewriter,
+ omp::TargetOp &targetOp, Block *targetBlock,
+ Block *newTargetBlock,
+ SmallVector<Value> &hostEvalVars,
+ SmallVector<Value> &mapOperands,
+ SmallVector<Value> &allocs,
+ IRMapping &irMapping) {
+ // FIRST: Map `host_eval_vars` to block arguments
+ unsigned originalHostEvalVarsSize = targetOp.getHostEvalVars().size();
+ for (unsigned i = 0; i < hostEvalVars.size(); ++i) {
+ Value originalValue;
+ BlockArgument newArg;
+ if (i < originalHostEvalVarsSize) {
+ originalValue = targetBlock->getArgument(i); // Host_eval args come first
+ newArg = newTargetBlock->addArgument(originalValue.getType(),
+ originalValue.getLoc());
+ } else {
+ originalValue = hostEvalVars[i];
+ newArg = newTargetBlock->addArgument(originalValue.getType(),
+ originalValue.getLoc());
+ }
+ irMapping.map(originalValue, newArg);
+ }
+
+ // SECOND: Map `map_operands` to block arguments
+ unsigned originalMapVarsSize = targetOp.getMapVars().size();
+ for (unsigned i = 0; i < mapOperands.size(); ++i) {
+ Value originalValue;
+ BlockArgument newArg;
+ // Map the new arguments from the original block.
+ if (i < originalMapVarsSize) {
+ originalValue = targetBlock->getArgument(originalHostEvalVarsSize +
+ i); // Offset by host_eval count
+ newArg = newTargetBlock->addArgument(originalValue.getType(),
+ originalValue.getLoc());
+ }
+ // Map the new arguments from the `allocs`.
+ else {
+ originalValue = allocs[i - originalMapVarsSize];
+ newArg = newTargetBlock->addArgument(
+ getPtrTypeForOmp(originalValue.getType()), originalValue.getLoc());
+ }
+ irMapping.map(originalValue, newArg);
+ }
+
+ // THIRD: Map `private_vars` to block arguments (if any)
+ unsigned originalPrivateVarsSize = targetOp.getPrivateVars().size();
+ for (unsigned i = 0; i < originalPrivateVarsSize; ++i) {
+ auto originalArg = targetBlock->getArgument(originalHostEvalVarsSize +
+ originalMapVarsSize + i);
+ auto newArg = newTargetBlock->addArgument(originalArg.getType(),
+ originalArg.getLoc());
+ irMapping.map(originalArg, newArg);
+ }
+ return;
+}
+
+/// reloadCacheAndRecompute reloads cached values and recomputes operations
+static void reloadCacheAndRecompute(
+ Location loc, RewriterBase &rewriter, Operation *splitBefore,
+ omp::TargetOp &targetOp, Block *targetBlock, Block *newTargetBlock,
+ SmallVector<Value> &hostEvalVars, SmallVector<Value> &mapOperands,
+ SmallVector<Value> &allocs, SetVector<Operation *> &toRecompute,
+ IRMapping &irMapping) {
+ // Handle the load operations for the allocs.
+ rewriter.setInsertionPointToStart(newTargetBlock);
+ auto llvmPtrTy = LLVM::LLVMPointerType::get(targetOp.getContext());
+
+ unsigned originalMapVarsSize = targetOp.getMapVars().size();
+ unsigned hostEvalVarsSize = hostEvalVars.size();
+ // Create load operations for each allocated variable.
+ for (unsigned i = 0; i < allocs.size(); ++i) {
+ Value original = allocs[i];
+ // Get the new block argument for this specific allocated value.
+ Value newArg =
+ newTargetBlock->getArgument(hostEvalVarsSize + originalMapVarsSize + i);
+ Value restored;
+ // If the original value is a pointer or reference, load and convert if
+ // necessary.
+ if (isPtr(original.getType())) {
+ restored = rewriter.create<LLVM::LoadOp>(loc, llvmPtrTy, newArg);
+ if (!isa<LLVM::LLVMPointerType>(original.getType()))
+ restored =
+ rewriter.create<fir::ConvertOp>(loc, original.getType(), restored);
+ } else {
+ restored = rewriter.create<fir::LoadOp>(loc, newArg);
+ }
+ irMapping.map(original, restored);
+ }
+ // Clone the operations if they are in the toRecompute set.
+ for (auto it = targetBlock->begin(); it != splitBefore->getIterator(); it++) {
+ if (toRecompute.contains(&*it))
+ rewriter.clone(*it, irMapping);
+ }
+}
+
+/// Given a teamsOp, navigate down the nested structure to find the
+/// innermost LoopNestOp. The expected nesting is:
+/// teams -> parallel -> distribute -> wsloop -> loop_nest
+static mlir::omp::LoopNestOp getLoopNestFromTeams(mlir::omp::TeamsOp teamsOp) {
+ if (teamsOp.getRegion().empty())
+ return nullptr;
+ // Ensure the teams region has a single block.
+ if (teamsOp.getRegion().getBlocks().size() != 1)
+ return nullptr;
+ // Find parallel op inside teams
+ mlir::omp::ParallelOp parallelOp = nullptr;
+ // Look for the parallel op in the teams region
+ for (auto &op : teamsOp.getRegion().front()) {
+ if (auto parallel = dyn_cast<mlir::omp::ParallelOp>(op)) {
+ parallelOp = parallel;
+ break;
+ }
+ }
+ if (!parallelOp)
+ return nullptr;
+
+ // Find distribute op inside parallel
+ mlir::omp::DistributeOp distributeOp = nullptr;
+ for (auto &op : parallelOp.getRegion().front()) {
+ if (auto distribute = dyn_cast<mlir::omp::DistributeOp>(op)) {
+ distributeOp = distribute;
+ break;
+ }
+ }
+ if (!distributeOp)
+ return nullptr;
+
+ // Find wsloop op inside distribute
+ mlir::omp::WsloopOp wsloopOp = nullptr;
+ for (auto &op : distributeOp.getRegion().front()) {
+ if (auto wsloop = dyn_cast<mlir::omp::WsloopOp>(op)) {
+ wsloopOp = wsloop;
+ break;
+ }
+ }
+ if (!wsloopOp)
+ return nullptr;
+
+ // Find loop_nest op inside wsloop
+ for (auto &op : wsloopOp.getRegion().front()) {
+ if (auto loopNest = dyn_cast<mlir::omp::LoopNestOp>(op)) {
+ return loopNest;
+ }
+ }
+
+ return nullptr;
+}
+
+/// Generate LLVM constant operations for i32 and i64 types.
+static mlir::LLVM::ConstantOp
+genI32Constant(mlir::Location loc, mlir::RewriterBase &rewriter, int value) {
+ mlir::Type i32Ty = rewriter.getI32Type();
+ mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value);
+ return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr);
+}
+
+/// Given a box descriptor, extract the base address of the data it describes.
+/// If the box descriptor is a reference, load it first.
+/// The base address is returned as an i8* pointer.
+static Value genDescriptorGetBaseAddress(fir::FirOpBuilder &builder,
+ Location loc, Value boxDesc) {
+ Value box = boxDesc;
+ if (auto refBox = dyn_cast<fir::ReferenceType>(boxDesc.getType())) {
+ box = fir::LoadOp::create(builder, loc, boxDesc);
+ }
+ assert(isa<fir::BoxType>(box.getType()) &&
+ "Unknown type passed to genDescriptorGetBaseAddress");
+ auto i8Type = builder.getI8Type();
+ auto unknownArrayType =
+ fir::SequenceType::get({fir::SequenceType::getUnknownExtent()}, i8Type);
+ auto i8BoxType = fir::BoxType::get(unknownArrayType);
+ auto typedBox = fir::ConvertOp::create(builder, loc, i8BoxType, box);
+ auto rawAddr = fir::BoxAddrOp::create(builder, loc, typedBox);
+ return rawAddr;
+}
+
+/// Given a box descriptor, extract the total number of elements in the array it
+/// describes. If the box descriptor is a reference, load it first.
+/// The total number of elements is returned as an i64 value.
+static Value genDescriptorGetTotalElements(fir::FirOpBuilder &builder,
+ Location loc, Value boxDesc) {
+ Value box = boxDesc;
+ if (auto refBox = dyn_cast<fir::ReferenceType>(boxDesc.getType())) {
+ box = fir::LoadOp::create(builder, loc, boxDesc);
+ }
+ assert(isa<fir::BoxType>(box.getType()) &&
+ "Unknown type passed to genDescriptorGetTotalElements");
+ auto i64Type = builder.getI64Type();
+ return fir::BoxTotalElementsOp::create(builder, loc, i64Type, box);
+}
+
+/// Given a box descriptor, extract the size of each element in the array it
+/// describes. If the box descriptor is a reference, load it first.
+/// The element size is returned as an i64 value.
+static Value genDescriptorGetEleSize(fir::FirOpBuilder &builder, Location loc,
+ Value boxDesc) {
+ Value box = boxDesc;
+ if (auto refBox = dyn_cast<fir::ReferenceType>(boxDesc.getType())) {
+ box = fir::LoadOp::create(builder, loc, boxDesc);
+ }
+ assert(isa<fir::BoxType>(box.getType()) &&
+ "Unknown type passed to genDescriptorGetElementSize");
+ auto i64Type = builder.getI64Type();
+ return fir::BoxEleSizeOp::create(builder, loc, i64Type, box);
+}
+
+/// Given a box descriptor, compute the total size in bytes of the data it
+/// describes. This is done by multiplying the total number of elements by the
+/// size of each element. If the box descriptor is a reference, load it first.
+/// The total size in bytes is returned as an i64 value.
+static Value genDescriptorGetDataSizeInBytes(fir::FirOpBuilder &builder,
+ Location loc, Value boxDesc) {
+ Value box = boxDesc;
+ if (auto refBox = dyn_cast<fir::ReferenceType>(boxDesc.getType())) {
+ box = fir::LoadOp::create(builder, loc, boxDesc);
+ }
+ assert(isa<fir::BoxType>(box.getType()) &&
+ "Unknown type passed to genDescriptorGetElementSize");
+ Value eleSize = genDescriptorGetEleSize(builder, loc, box);
+ Value totalElements = genDescriptorGetTotalElements(builder, loc, box);
+ return mlir::arith::MulIOp::create(builder, loc, totalElements, eleSize);
+}
+
+/// Generate a call to the OpenMP runtime function `omp_get_mapped_ptr` to
+/// retrieve the device pointer corresponding to a given host pointer and device
+/// number. If no mapping exists, the original host pointer is returned.
+/// Signature:
+/// void *omp_get_mapped_ptr(void *host_ptr, int device_num);
+static mlir::Value genOmpGetMappedPtrIfPresent(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ mlir::Value hostPtr,
+ mlir::Value deviceNum,
+ mlir::ModuleOp module) {
+ auto *context = builder.getContext();
+ auto voidPtrType = fir::LLVMPointerType::get(context, builder.getI8Type());
+ auto i32Type = builder.getI32Type();
+ auto funcName = "omp_get_mapped_ptr";
+ auto funcOp = module.lookupSymbol<mlir::func::FuncOp>(funcName);
+
+ if (!funcOp) {
+ auto funcType =
+ mlir::FunctionType::get(context, {voidPtrType, i32Type}, {voidPtrType});
+
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToStart(module.getBody());
+
+ funcOp = mlir::func::FuncOp::create(builder, loc, funcName, funcType);
+ funcOp.setPrivate();
+ }
+
+ llvm::SmallVector<mlir::Value> args;
+ args.push_back(fir::ConvertOp::create(builder, loc, voidPtrType, hostPtr));
+ args.push_back(fir::ConvertOp::create(builder, loc, i32Type, deviceNum));
+ auto callOp = fir::CallOp::create(builder, loc, funcOp, args);
+ auto mappedPtr = callOp.getResult(0);
+ auto isNull = builder.genIsNullAddr(loc, mappedPtr);
+ auto convertedHostPtr =
+ fir::ConvertOp::create(builder, loc, voidPtrType, hostPtr);
+ auto result = arith::SelectOp::create(builder, loc, isNull, convertedHostPtr,
+ mappedPtr);
+ return result;
+}
+
+/// Generate a call to the OpenMP runtime function `omp_target_memcpy` to
+/// perform memory copy between host and device or between devices.
+/// Signature:
+/// int omp_target_memcpy(void *dst, const void *src, size_t length,
+/// size_t dst_offset, size_t src_offset,
+/// int dst_device, int src_device);
+static void genOmpTargetMemcpyCall(fir::FirOpBuilder &builder,
+ mlir::Location loc, mlir::Value dst,
+ mlir::Value src, mlir::Value length,
+ mlir::Value dstOffset, mlir::Value srcOffset,
+ mlir::Value device, mlir::ModuleOp module) {
+ auto *context = builder.getContext();
+ auto funcName = "omp_target_memcpy";
+ auto voidPtrType = fir::LLVMPointerType::get(context, builder.getI8Type());
+ auto sizeTType = builder.getI64Type(); // assuming size_t is 64-bit
+ auto i32Type = builder.getI32Type();
+ auto funcOp = module.lookupSymbol<mlir::func::FuncOp>(funcName);
+
+ if (!funcOp) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToStart(module.getBody());
+ llvm::SmallVector<mlir::Type> argTypes = {
+ voidPtrType, voidPtrType, sizeTType, sizeTType,
+ sizeTType, i32Type, i32Type};
+ auto funcType = mlir::FunctionType::get(context, argTypes, {i32Type});
+ funcOp = mlir::func::FuncOp::create(builder, loc, funcName, funcType);
+ funcOp.setPrivate();
+ }
+
+ llvm::SmallVector<mlir::Value> args{dst, src, length, dstOffset,
+ srcOffset, device, device};
+ fir::CallOp::create(builder, loc, funcOp, args);
+ return;
+}
+
+/// Generate code to replace a Fortran array assignment call with OpenMP
+/// runtime calls to perform the equivalent operation on the device.
+/// This involves extracting the source and destination pointers from the
+/// Fortran array descriptors, retrieving their mapped device pointers (if any),
+/// and invoking `omp_target_memcpy` to copy the data on the device.
+static void genFortranAssignOmpReplacement(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ fir::CallOp callOp,
+ mlir::Value device,
+ mlir::ModuleOp module) {
+ assert(callOp.getNumResults() == 0 &&
+ "Expected _FortranAAssign to have no results");
+ assert(callOp.getNumOperands() >= 2 &&
+ "Expected _FortranAAssign to have at least two operands");
+
+ // Extract the source and destination pointers from the call operands.
+ mlir::Value dest = callOp.getOperand(0);
+ mlir::Value src = callOp.getOperand(1);
+
+ // Get the base addresses of the source and destination arrays.
+ mlir::Value srcBase = genDescriptorGetBaseAddress(builder, loc, src);
+ mlir::Value destBase = genDescriptorGetBaseAddress(builder, loc, dest);
+
+ // Get the total size in bytes of the data to be copied.
+ mlir::Value srcDataSize = genDescriptorGetDataSizeInBytes(builder, loc, src);
+
+ // Retrieve the mapped device pointers for source and destination.
+ // If no mapping exists, the original host pointer is used.
+ Value destPtr =
+ genOmpGetMappedPtrIfPresent(builder, loc, destBase, device, module);
+ Value srcPtr =
+ genOmpGetMappedPtrIfPresent(builder, loc, srcBase, device, module);
+ Value zero = builder.create<LLVM::ConstantOp>(loc, builder.getI64Type(),
+ builder.getI64IntegerAttr(0));
+
+ // Generate the call to omp_target_memcpy to perform the data copy on the
+ // device.
+ genOmpTargetMemcpyCall(builder, loc, destPtr, srcPtr, srcDataSize, zero, zero,
+ device, module);
+}
+
+/// Struct to hold the host eval vars corresponding to loop bounds and steps
+struct HostEvalVars {
+ SmallVector<Value> lbs;
+ SmallVector<Value> ubs;
+ SmallVector<Value> steps;
+};
+
+/// moveToHost method clones all the ops from target region outside of it.
+/// It hoists runtime function "_FortranAAssign" and replaces it with omp
+/// version. Also hoists and replaces fir.allocmem with omp.target_allocmem and
+/// fir.freemem with omp.target_freemem
+static LogicalResult moveToHost(omp::TargetOp targetOp, RewriterBase &rewriter,
+ mlir::ModuleOp module,
+ struct HostEvalVars &hostEvalVars) {
+ OpBuilder::InsertionGuard guard(rewriter);
+ Block *targetBlock = &targetOp.getRegion().front();
+ assert(targetBlock == &targetOp.getRegion().back());
+ IRMapping mapping;
+
+ // Get the parent target_data op
+ auto targetDataOp = cast<omp::TargetDataOp>(targetOp->getParentOp());
+ if (!targetDataOp) {
+ emitError(targetOp->getLoc(),
+ "Expected target op to be inside target_data op");
+ return failure();
+ }
+ // create mapping for host_eval_vars
+ unsigned hostEvalVarCount = targetOp.getHostEvalVars().size();
+ for (unsigned i = 0; i < targetOp.getHostEvalVars().size(); ++i) {
+ Value hostEvalVar = targetOp.getHostEvalVars()[i];
+ BlockArgument arg = targetBlock->getArguments()[i];
+ mapping.map(arg, hostEvalVar);
+ }
+ // create mapping for map_vars
+ for (unsigned i = 0; i < targetOp.getMapVars().size(); ++i) {
+ Value mapInfo = targetOp.getMapVars()[i];
+ BlockArgument arg = targetBlock->getArguments()[hostEvalVarCount + i];
+ Operation *op = mapInfo.getDefiningOp();
+ assert(op);
+ auto mapInfoOp = cast<omp::MapInfoOp>(op);
+ // map the block argument to the host-side variable pointer
+ mapping.map(arg, mapInfoOp.getVarPtr());
+ }
+ // create mapping for private_vars
+ unsigned mapSize = targetOp.getMapVars().size();
+ for (unsigned i = 0; i < targetOp.getPrivateVars().size(); ++i) {
+ Value privateVar = targetOp.getPrivateVars()[i];
+ // The mapping should link the device-side variable to the host-side one.
+ BlockArgument arg =
+ targetBlock->getArguments()[hostEvalVarCount + mapSize + i];
+ // Map the device-side copy (`arg`) to the host-side value (`privateVar`).
+ mapping.map(arg, privateVar);
+ }
+
+ rewriter.setInsertionPoint(targetOp);
+ SmallVector<Operation *> opsToReplace;
+ Value device = targetOp.getDevice();
+
+ // If device is not specified, default to device 0.
+ if (!device) {
+ device = genI32Constant(targetOp.getLoc(), rewriter, 0);
+ }
+ // Clone all operations.
+ for (auto it = targetBlock->begin(), end = std::prev(targetBlock->end());
+ it != end; ++it) {
+ auto *op = &*it;
+ Operation *clonedOp = rewriter.clone(*op, mapping);
+ // Map the results of the original op to the cloned op.
+ for (unsigned i = 0; i < op->getNumResults(); ++i) {
+ mapping.map(op->getResult(i), clonedOp->getResult(i));
+ }
+ // fir.declare changes its type when hoisting it out of omp.target to
+ // omp.target_data Introduce a load, if original declareOp input is not of
+ // reference type, but cloned delcareOp input is reference type.
+ if (fir::DeclareOp clonedDeclareOp = dyn_cast<fir::DeclareOp>(clonedOp)) {
+ auto originalDeclareOp = cast<fir::DeclareOp>(op);
+ Type originalInType = originalDeclareOp.getMemref().getType();
+ Type clonedInType = clonedDeclareOp.getMemref().getType();
+
+ fir::ReferenceType originalRefType =
+ dyn_cast<fir::ReferenceType>(originalInType);
+ fir::ReferenceType clonedRefType =
+ dyn_cast<fir::ReferenceType>(clonedInType);
+ if (!originalRefType && clonedRefType) {
+ Type clonedEleTy = clonedRefType.getElementType();
+ if (clonedEleTy == originalDeclareOp.getType()) {
+ opsToReplace.push_back(clonedOp);
+ }
+ }
+ }
+ // Collect the ops to be replaced.
+ if (isa<fir::AllocMemOp>(clonedOp) || isa<fir::FreeMemOp>(clonedOp))
+ opsToReplace.push_back(clonedOp);
+ // Check for runtime calls to be replaced.
+ if (isRuntimeCall(clonedOp)) {
+ fir::CallOp runtimeCall = cast<fir::CallOp>(op);
+ auto funcName = runtimeCall.getCallee()->getRootReference().getValue();
+ if (funcName == FortranAssignStr) {
+ opsToReplace.push_back(clonedOp);
+ } else {
+ emitError(runtimeCall->getLoc(), "Unhandled runtime call hoisting.");
+ return failure();
+ }
+ }
+ }
+ // Replace fir.allocmem with omp.target_allocmem.
+ for (Operation *op : opsToReplace) {
+ if (auto allocOp = dyn_cast<fir::AllocMemOp>(op)) {
+ rewriter.setInsertionPoint(allocOp);
+ auto ompAllocmemOp = rewriter.create<omp::TargetAllocMemOp>(
+ allocOp.getLoc(), rewriter.getI64Type(), device,
+ allocOp.getInTypeAttr(), allocOp.getUniqNameAttr(),
+ allocOp.getBindcNameAttr(), allocOp.getTypeparams(),
+ allocOp.getShape());
+ auto firConvertOp = rewriter.create<fir::ConvertOp>(
+ allocOp.getLoc(), allocOp.getResult().getType(),
+ ompAllocmemOp.getResult());
+ rewriter.replaceOp(allocOp, firConvertOp.getResult());
+ }
+ // Replace fir.freemem with omp.target_freemem.
+ else if (auto freeOp = dyn_cast<fir::FreeMemOp>(op)) {
+ rewriter.setInsertionPoint(freeOp);
+ auto firConvertOp = rewriter.create<fir::ConvertOp>(
+ freeOp.getLoc(), rewriter.getI64Type(), freeOp.getHeapref());
+ rewriter.create<omp::TargetFreeMemOp>(freeOp.getLoc(), device,
+ firConvertOp.getResult());
+ rewriter.eraseOp(freeOp);
+ }
+ // fir.declare changes its type when hoisting it out of omp.target to
+ // omp.target_data Introduce a load, if original declareOp input is not of
+ // reference type, but cloned delcareOp input is reference type.
+ else if (fir::DeclareOp clonedDeclareOp = dyn_cast<fir::DeclareOp>(op)) {
+ Type clonedInType = clonedDeclareOp.getMemref().getType();
+ fir::ReferenceType clonedRefType =
+ dyn_cast<fir::ReferenceType>(clonedInType);
+ Type clonedEleTy = clonedRefType.getElementType();
+ rewriter.setInsertionPoint(op);
+ Value loadedValue = rewriter.create<fir::LoadOp>(
+ clonedDeclareOp.getLoc(), clonedEleTy, clonedDeclareOp.getMemref());
+ clonedDeclareOp.getResult().replaceAllUsesWith(loadedValue);
+ }
+ // Replace runtime calls with omp versions.
+ else if (isRuntimeCall(op)) {
+ fir::CallOp runtimeCall = cast<fir::CallOp>(op);
+ auto funcName = runtimeCall.getCallee()->getRootReference().getValue();
+ if (funcName == FortranAssignStr) {
+ rewriter.setInsertionPoint(op);
+ fir::FirOpBuilder builder{rewriter, op};
+
+ mlir::Location loc = runtimeCall.getLoc();
+ genFortranAssignOmpReplacement(builder, loc, runtimeCall, device,
+ module);
+ rewriter.eraseOp(op);
+ } else {
+ emitError(runtimeCall->getLoc(), "Unhandled runtime call hoisting.");
+ return failure();
+ }
+ } else {
+ emitError(op->getLoc(), "Unhandled op hoisting.");
+ return failure();
+ }
+ }
+
+ // Update the host_eval_vars to use the mapped values.
+ for (size_t i = 0; i < hostEvalVars.lbs.size(); ++i) {
+ hostEvalVars.lbs[i] = mapping.lookup(hostEvalVars.lbs[i]);
+ hostEvalVars.ubs[i] = mapping.lookup(hostEvalVars.ubs[i]);
+ hostEvalVars.steps[i] = mapping.lookup(hostEvalVars.steps[i]);
+ }
+ // Finally erase the original targetOp.
+ rewriter.eraseOp(targetOp);
+ return success();
+}
+
+/// Result of isolateOp method
+struct SplitResult {
+ omp::TargetOp preTargetOp;
+ omp::TargetOp isolatedTargetOp;
+ omp::TargetOp postTargetOp;
+};
+
+/// computeAllocsCacheRecomputable method computes the allocs needed to cache
+/// the values that are used outside the split point. It also computes the ops
+/// that need to be cached and the ops that can be recomputed after the split.
+static void computeAllocsCacheRecomputable(
+ omp::TargetOp targetOp, Operation *splitBeforeOp, RewriterBase &rewriter,
+ SmallVector<Value> &preMapOperands, SmallVector<Value> &postMapOperands,
+ SmallVector<Value> &allocs, SmallVector<Value> &requiredVals,
+ SetVector<Operation *> &nonRecomputable, SetVector<Operation *> &toCache,
+ SetVector<Operation *> &toRecompute) {
+ auto *targetBlock = &targetOp.getRegion().front();
+ // Find all values that are used outside the split point.
+ for (auto it = targetBlock->begin(); it != splitBeforeOp->getIterator();
+ it++) {
+ // Check if any of the results are used outside the split point.
+ for (auto res : it->getResults()) {
+ if (usedOutsideSplit(res, splitBeforeOp)) {
+ requiredVals.push_back(res);
+ }
+ }
+ // If the op is not recomputable, add it to the nonRecomputable set.
+ if (!isRecomputableAfterFission(&*it, splitBeforeOp)) {
+ nonRecomputable.insert(&*it);
+ }
+ }
+ // For each required value, collect its dependencies.
+ for (auto requiredVal : requiredVals)
+ collectNonRecomputableDeps(requiredVal, targetOp, nonRecomputable, toCache,
+ toRecompute);
+ // For each op in toCache, create an alloc and update the pre and post map
+ // operands.
+ for (Operation *op : toCache) {
+ for (auto res : op->getResults()) {
+ auto alloc =
+ allocateTempOmpVar(targetOp.getLoc(), res.getType(), rewriter);
+ allocs.push_back(res);
+ preMapOperands.push_back(alloc.from);
+ postMapOperands.push_back(alloc.to);
+ }
+ }
+}
+
+/// genPreTargetOp method generates the preTargetOp that contains all the ops
+/// before the split point. It also creates the block arguments and maps the
+/// values accordingly. It also creates the store operations for the allocs.
+static omp::TargetOp
+genPreTargetOp(omp::TargetOp targetOp, SmallVector<Value> &preMapOperands,
+ SmallVector<Value> &allocs, Operation *splitBeforeOp,
+ RewriterBase &rewriter, struct HostEvalVars &hostEvalVars,
+ bool isTargetDevice) {
+ auto loc = targetOp.getLoc();
+ auto *targetBlock = &targetOp.getRegion().front();
+ SmallVector<Value> preHostEvalVars{targetOp.getHostEvalVars()};
+ // update the hostEvalVars of preTargetOp
+ omp::TargetOp preTargetOp = rewriter.create<omp::TargetOp>(
+ targetOp.getLoc(), targetOp.getAllocateVars(),
+ targetOp.getAllocatorVars(), targetOp.getBareAttr(),
+ targetOp.getDependKindsAttr(), targetOp.getDependVars(),
+ targetOp.getDevice(), targetOp.getHasDeviceAddrVars(), preHostEvalVars,
+ targetOp.getIfExpr(), targetOp.getInReductionVars(),
+ targetOp.getInReductionByrefAttr(), targetOp.getInReductionSymsAttr(),
+ targetOp.getIsDevicePtrVars(), preMapOperands, targetOp.getNowaitAttr(),
+ targetOp.getPrivateVars(), targetOp.getPrivateSymsAttr(),
+ targetOp.getPrivateNeedsBarrierAttr(), targetOp.getThreadLimit(),
+ targetOp.getPrivateMapsAttr());
+ auto *preTargetBlock = rewriter.createBlock(
+ &preTargetOp.getRegion(), preTargetOp.getRegion().begin(), {}, {});
+ IRMapping preMapping;
+ // Create block arguments and map the values.
+ createBlockArgsAndMap(loc, rewriter, targetOp, targetBlock, preTargetBlock,
+ preHostEvalVars, preMapOperands, allocs, preMapping);
+
+ // Handle the store operations for the allocs.
+ rewriter.setInsertionPointToStart(preTargetBlock);
+ auto llvmPtrTy = LLVM::LLVMPointerType::get(targetOp.getContext());
+
+ // Clone the original operations.
+ for (auto it = targetBlock->begin(); it != splitBeforeOp->getIterator();
+ it++) {
+ rewriter.clone(*it, preMapping);
+ }
+
+ unsigned originalHostEvalVarsSize = preHostEvalVars.size();
+ unsigned originalMapVarsSize = targetOp.getMapVars().size();
+ // Create Stores for allocs.
+ for (unsigned i = 0; i < allocs.size(); ++i) {
+ Value originalResult = allocs[i];
+ Value toStore = preMapping.lookup(originalResult);
+ // Get the new block argument for this specific allocated value.
+ Value newArg = preTargetBlock->getArgument(originalHostEvalVarsSize +
+ originalMapVarsSize + i);
+ // Create the store operation.
+ if (isPtr(originalResult.getType())) {
+ if (!isa<LLVM::LLVMPointerType>(toStore.getType()))
+ toStore = rewriter.create<fir::ConvertOp>(loc, llvmPtrTy, toStore);
+ rewriter.create<LLVM::StoreOp>(loc, toStore, newArg);
+ } else {
+ rewriter.create<fir::StoreOp>(loc, toStore, newArg);
+ }
+ }
+ rewriter.create<omp::TerminatorOp>(loc);
+
+ // Update hostEvalVars with the mapped values for the loop bounds if we have
+ // a loopNestOp and we are not generating code for the target device.
+ omp::LoopNestOp loopNestOp =
+ getLoopNestFromTeams(cast<omp::TeamsOp>(splitBeforeOp));
+ if (loopNestOp && !isTargetDevice) {
+ for (size_t i = 0; i < loopNestOp.getLoopLowerBounds().size(); ++i) {
+ Value lb = loopNestOp.getLoopLowerBounds()[i];
+ Value ub = loopNestOp.getLoopUpperBounds()[i];
+ Value step = loopNestOp.getLoopSteps()[i];
+
+ hostEvalVars.lbs.push_back(preMapping.lookup(lb));
+ hostEvalVars.ubs.push_back(preMapping.lookup(ub));
+ hostEvalVars.steps.push_back(preMapping.lookup(step));
+ }
+ }
+
+ return preTargetOp;
+}
+
+/// genIsolatedTargetOp method generates the isolatedTargetOp that contains the
+/// ops between the split point. It also creates the block arguments and maps
+/// the values accordingly. It also creates the load operations for the allocs
+/// and recomputes the necessary ops.
+static omp::TargetOp
+genIsolatedTargetOp(omp::TargetOp targetOp, SmallVector<Value> &postMapOperands,
+ Operation *splitBeforeOp, RewriterBase &rewriter,
+ SmallVector<Value> &allocs,
+ SetVector<Operation *> &toRecompute,
+ struct HostEvalVars &hostEvalVars, bool isTargetDevice) {
+ auto loc = targetOp.getLoc();
+ auto *targetBlock = &targetOp.getRegion().front();
+ SmallVector<Value> isolatedHostEvalVars{targetOp.getHostEvalVars()};
+ // update the hostEvalVars of isolatedTargetOp
+ if (!hostEvalVars.lbs.empty() && !isTargetDevice) {
+ isolatedHostEvalVars.append(hostEvalVars.lbs.begin(),
+ hostEvalVars.lbs.end());
+ isolatedHostEvalVars.append(hostEvalVars.ubs.begin(),
+ hostEvalVars.ubs.end());
+ isolatedHostEvalVars.append(hostEvalVars.steps.begin(),
+ hostEvalVars.steps.end());
+ }
+ // Create the isolated target op
+ omp::TargetOp isolatedTargetOp = rewriter.create<omp::TargetOp>(
+ targetOp.getLoc(), targetOp.getAllocateVars(),
+ targetOp.getAllocatorVars(), targetOp.getBareAttr(),
+ targetOp.getDependKindsAttr(), targetOp.getDependVars(),
+ targetOp.getDevice(), targetOp.getHasDeviceAddrVars(),
+ isolatedHostEvalVars, targetOp.getIfExpr(), targetOp.getInReductionVars(),
+ targetOp.getInReductionByrefAttr(), targetOp.getInReductionSymsAttr(),
+ targetOp.getIsDevicePtrVars(), postMapOperands, targetOp.getNowaitAttr(),
+ targetOp.getPrivateVars(), targetOp.getPrivateSymsAttr(),
+ targetOp.getPrivateNeedsBarrierAttr(), targetOp.getThreadLimit(),
+ targetOp.getPrivateMapsAttr());
+ auto *isolatedTargetBlock =
+ rewriter.createBlock(&isolatedTargetOp.getRegion(),
+ isolatedTargetOp.getRegion().begin(), {}, {});
+ IRMapping isolatedMapping;
+ // Create block arguments and map the values.
+ createBlockArgsAndMap(loc, rewriter, targetOp, targetBlock,
+ isolatedTargetBlock, isolatedHostEvalVars,
+ postMapOperands, allocs, isolatedMapping);
+ // Handle the load operations for the allocs and recompute ops.
+ reloadCacheAndRecompute(loc, rewriter, splitBeforeOp, targetOp, targetBlock,
+ isolatedTargetBlock, isolatedHostEvalVars,
+ postMapOperands, allocs, toRecompute,
+ isolatedMapping);
+
+ // Clone the original operations.
+ rewriter.clone(*splitBeforeOp, isolatedMapping);
+ rewriter.create<omp::TerminatorOp>(loc);
+
+ // update the loop bounds in the isolatedTargetOp if we have host_eval vars
+ // and we are not generating code for the target device.
+ if (!hostEvalVars.lbs.empty() && !isTargetDevice) {
+ omp::TeamsOp teamsOp;
+ for (auto &op : *isolatedTargetBlock) {
+ if (isa<omp::TeamsOp>(&op))
+ teamsOp = cast<omp::TeamsOp>(&op);
+ }
+ assert(teamsOp && "No teamsOp found in isolated target region");
+ // Get the loopNestOp inside the teamsOp
+ auto loopNestOp = getLoopNestFromTeams(teamsOp);
+ // Get the BlockArgs related to host_eval vars and update loop_nest bounds
+ // to them
+ unsigned originalHostEvalVarsSize = targetOp.getHostEvalVars().size();
+ unsigned index = originalHostEvalVarsSize;
+ // Replace loop bounds with the block arguments passed down via host_eval
+ SmallVector<Value> lbs, ubs, steps;
+
+ // Collect new lb/ub/step values from target block args
+ for (size_t i = 0; i < hostEvalVars.lbs.size(); ++i)
+ lbs.push_back(isolatedTargetBlock->getArgument(index++));
+
+ for (size_t i = 0; i < hostEvalVars.ubs.size(); ++i)
+ ubs.push_back(isolatedTargetBlock->getArgument(index++));
+
+ for (size_t i = 0; i < hostEvalVars.steps.size(); ++i)
+ steps.push_back(isolatedTargetBlock->getArgument(index++));
+
+ // Reset the loop bounds
+ loopNestOp.getLoopLowerBoundsMutable().assign(lbs);
+ loopNestOp.getLoopUpperBoundsMutable().assign(ubs);
+ loopNestOp.getLoopStepsMutable().assign(steps);
+ }
+
+ return isolatedTargetOp;
+}
+
+/// genPostTargetOp method generates the postTargetOp that contains all the ops
+/// after the split point. It also creates the block arguments and maps the
+/// values accordingly. It also creates the load operations for the allocs
+/// and recomputes the necessary ops.
+static omp::TargetOp genPostTargetOp(omp::TargetOp targetOp,
+ Operation *splitBeforeOp,
+ SmallVector<Value> &postMapOperands,
+ RewriterBase &rewriter,
+ SmallVector<Value> &allocs,
+ SetVector<Operation *> &toRecompute) {
+ auto loc = targetOp.getLoc();
+ auto *targetBlock = &targetOp.getRegion().front();
+ SmallVector<Value> postHostEvalVars{targetOp.getHostEvalVars()};
+ // Create the post target op
+ omp::TargetOp postTargetOp = rewriter.create<omp::TargetOp>(
+ targetOp.getLoc(), targetOp.getAllocateVars(),
+ targetOp.getAllocatorVars(), targetOp.getBareAttr(),
+ targetOp.getDependKindsAttr(), targetOp.getDependVars(),
+ targetOp.getDevice(), targetOp.getHasDeviceAddrVars(), postHostEvalVars,
+ targetOp.getIfExpr(), targetOp.getInReductionVars(),
+ targetOp.getInReductionByrefAttr(), targetOp.getInReductionSymsAttr(),
+ targetOp.getIsDevicePtrVars(), postMapOperands, targetOp.getNowaitAttr(),
+ targetOp.getPrivateVars(), targetOp.getPrivateSymsAttr(),
+ targetOp.getPrivateNeedsBarrierAttr(), targetOp.getThreadLimit(),
+ targetOp.getPrivateMapsAttr());
+ // Create the block for postTargetOp
+ auto *postTargetBlock = rewriter.createBlock(
+ &postTargetOp.getRegion(), postTargetOp.getRegion().begin(), {}, {});
+ IRMapping postMapping;
+ // Create block arguments and map the values.
+ createBlockArgsAndMap(loc, rewriter, targetOp, targetBlock, postTargetBlock,
+ postHostEvalVars, postMapOperands, allocs, postMapping);
+ // Handle the load operations for the allocs and recompute ops.
+ reloadCacheAndRecompute(loc, rewriter, splitBeforeOp, targetOp, targetBlock,
+ postTargetBlock, postHostEvalVars, postMapOperands,
+ allocs, toRecompute, postMapping);
+ assert(splitBeforeOp->getNumResults() == 0 ||
+ llvm::all_of(splitBeforeOp->getResults(),
+ [](Value result) { return result.use_empty(); }));
+ // Clone the original operations after the split point.
+ for (auto it = std::next(splitBeforeOp->getIterator());
+ it != targetBlock->end(); it++)
+ rewriter.clone(*it, postMapping);
+ return postTargetOp;
+}
+
+/// isolateOp method rewrites a omp.target_data { omp.target } in to
+/// omp.target_data {
+/// // preTargetOp region contains ops before splitBeforeOp.
+/// omp.target {}
+/// // isolatedTargetOp region contains splitBeforeOp,
+/// omp.target {}
+/// // postTargetOp region contains ops after splitBeforeOp.
+/// omp.target {}
+/// }
+/// It also handles the mapping of variables and the caching/recomputing
+/// of values as needed.
+static FailureOr<SplitResult> isolateOp(Operation *splitBeforeOp,
+ bool splitAfter, RewriterBase &rewriter,
+ mlir::ModuleOp module,
+ bool isTargetDevice) {
+ auto targetOp = cast<omp::TargetOp>(splitBeforeOp->getParentOp());
+ assert(targetOp);
+ rewriter.setInsertionPoint(targetOp);
+
+ // Prepare the map operands for preTargetOp and postTargetOp
+ auto preMapOperands = SmallVector<Value>(targetOp.getMapVars());
+ auto postMapOperands = SmallVector<Value>(targetOp.getMapVars());
+
+ // Vectors to hold analysis results
+ SmallVector<Value> requiredVals;
+ SetVector<Operation *> toCache;
+ SetVector<Operation *> toRecompute;
+ SetVector<Operation *> nonRecomputable;
+ SmallVector<Value> allocs;
+ struct HostEvalVars hostEvalVars;
+
+ // Analyze the ops in target region to determine which ops need to be
+ // cached and which ops need to be recomputed
+ computeAllocsCacheRecomputable(
+ targetOp, splitBeforeOp, rewriter, preMapOperands, postMapOperands,
+ allocs, requiredVals, nonRecomputable, toCache, toRecompute);
+
+ rewriter.setInsertionPoint(targetOp);
+
+ // Generate the preTargetOp that contains all the ops before splitBeforeOp.
+ auto preTargetOp =
+ genPreTargetOp(targetOp, preMapOperands, allocs, splitBeforeOp, rewriter,
+ hostEvalVars, isTargetDevice);
+
+ // Move the ops of preTarget to host.
+ auto res = moveToHost(preTargetOp, rewriter, module, hostEvalVars);
+ if (failed(res))
+ return failure();
+ rewriter.setInsertionPoint(targetOp);
+
+ // Generate the isolatedTargetOp
+ omp::TargetOp isolatedTargetOp =
+ genIsolatedTargetOp(targetOp, postMapOperands, splitBeforeOp, rewriter,
+ allocs, toRecompute, hostEvalVars, isTargetDevice);
+
+ omp::TargetOp postTargetOp = nullptr;
+ // Generate the postTargetOp that contains all the ops after splitBeforeOp.
+ if (splitAfter) {
+ rewriter.setInsertionPoint(targetOp);
+ postTargetOp = genPostTargetOp(targetOp, splitBeforeOp, postMapOperands,
+ rewriter, allocs, toRecompute);
+ }
+ // Finally erase the original targetOp.
+ rewriter.eraseOp(targetOp);
+ return SplitResult{preTargetOp, isolatedTargetOp, postTargetOp};
+}
+
+/// Recursively fission target ops until no more nested ops can be isolated.
+static LogicalResult fissionTarget(omp::TargetOp targetOp,
+ RewriterBase &rewriter,
+ mlir::ModuleOp module, bool isTargetDevice) {
+ auto tuple = getNestedOpToIsolate(targetOp);
+ if (!tuple) {
+ LLVM_DEBUG(llvm::dbgs() << " No op to isolate\n");
+ struct HostEvalVars hostEvalVars;
+ return moveToHost(targetOp, rewriter, module, hostEvalVars);
+ }
+ Operation *toIsolate = std::get<0>(*tuple);
+ bool splitBefore = !std::get<1>(*tuple);
+ bool splitAfter = !std::get<2>(*tuple);
+ // Recursively isolate the target op.
+ if (splitBefore && splitAfter) {
+ auto res =
+ isolateOp(toIsolate, splitAfter, rewriter, module, isTargetDevice);
+ if (failed(res))
+ return failure();
+ return fissionTarget((*res).postTargetOp, rewriter, module, isTargetDevice);
+ }
+ // Isolate only before the op.
+ if (splitBefore) {
+ auto res =
+ isolateOp(toIsolate, splitAfter, rewriter, module, isTargetDevice);
+ if (failed(res))
+ return failure();
+ } else {
+ emitError(toIsolate->getLoc(), "Unhandled case in fissionTarget");
+ return failure();
+ }
+ return success();
+}
+
+/// Pass to lower omp.workdistribute ops.
+class LowerWorkdistributePass
+ : public flangomp::impl::LowerWorkdistributeBase<LowerWorkdistributePass> {
+public:
+ void runOnOperation() override {
+ MLIRContext &context = getContext();
+ auto moduleOp = getOperation();
+ bool changed = false;
+ SetVector<omp::TargetOp> targetOpsToProcess;
+ auto verify =
+ moduleOp->walk([&](mlir::omp::WorkdistributeOp workdistribute) {
+ if (failed(verifyTargetTeamsWorkdistribute(workdistribute)))
+ return WalkResult::interrupt();
+ return WalkResult::advance();
+ });
+ if (verify.wasInterrupted())
+ return signalPassFailure();
+
+ auto fission =
+ moduleOp->walk([&](mlir::omp::WorkdistributeOp workdistribute) {
+ auto res = fissionWorkdistribute(workdistribute);
+ if (failed(res))
+ return WalkResult::interrupt();
+ changed |= *res;
+ return WalkResult::advance();
+ });
+ if (fission.wasInterrupted())
+ return signalPassFailure();
+
+ auto rtCallLower =
+ moduleOp->walk([&](mlir::omp::WorkdistributeOp workdistribute) {
+ auto res = workdistributeRuntimeCallLower(workdistribute,
+ targetOpsToProcess);
+ if (failed(res))
+ return WalkResult::interrupt();
+ changed |= *res;
+ return WalkResult::advance();
+ });
+ if (rtCallLower.wasInterrupted())
+ return signalPassFailure();
+
+ moduleOp->walk([&](mlir::omp::WorkdistributeOp workdistribute) {
+ changed |= workdistributeDoLower(workdistribute, targetOpsToProcess);
+ });
+
+ moduleOp->walk([&](mlir::omp::TeamsOp teams) {
+ changed |= teamsWorkdistributeToSingleOp(teams, targetOpsToProcess);
+ });
+ if (changed) {
+ bool isTargetDevice =
+ llvm::cast<mlir::omp::OffloadModuleInterface>(*moduleOp)
+ .getIsTargetDevice();
+ IRRewriter rewriter(&context);
+ for (auto targetOp : targetOpsToProcess) {
+ auto res = splitTargetData(targetOp, rewriter);
+ if (failed(res))
+ return signalPassFailure();
+ if (*res) {
+ if (failed(fissionTarget(*res, rewriter, moduleOp, isTargetDevice)))
+ return signalPassFailure();
+ }
+ }
+ }
+ }
+};
+} // namespace
diff --git a/flang/lib/Optimizer/Passes/Pipelines.cpp b/flang/lib/Optimizer/Passes/Pipelines.cpp
index a83b066..1ecb6d3 100644
--- a/flang/lib/Optimizer/Passes/Pipelines.cpp
+++ b/flang/lib/Optimizer/Passes/Pipelines.cpp
@@ -301,8 +301,10 @@ void createHLFIRToFIRPassPipeline(mlir::PassManager &pm,
addNestedPassToAllTopLevelOperations<PassConstructor>(
pm, hlfir::createInlineHLFIRAssign);
pm.addPass(hlfir::createConvertHLFIRtoFIR());
- if (enableOpenMP != EnableOpenMP::None)
+ if (enableOpenMP != EnableOpenMP::None) {
pm.addPass(flangomp::createLowerWorkshare());
+ pm.addPass(flangomp::createLowerWorkdistribute());
+ }
if (enableOpenMP == EnableOpenMP::Simd)
pm.addPass(flangomp::createSimdOnlyPass());
}
diff --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
index 061a7d2..bdc3418 100644
--- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
+++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
@@ -474,7 +474,7 @@ public:
mlir::PatternRewriter &rewriter) const override {
LLVM_DEBUG(llvm::dbgs() << "AffineLoopConversion: rewriting loop:\n";
loop.dump(););
- LLVM_ATTRIBUTE_UNUSED auto loopAnalysis =
+ [[maybe_unused]] auto loopAnalysis =
functionAnalysis.getChildLoopAnalysis(loop);
if (!loopAnalysis.canPromoteToAffine())
return rewriter.notifyMatchFailure(loop, "cannot promote to affine");
diff --git a/flang/lib/Optimizer/Transforms/StackArrays.cpp b/flang/lib/Optimizer/Transforms/StackArrays.cpp
index 80b3f68..8601499 100644
--- a/flang/lib/Optimizer/Transforms/StackArrays.cpp
+++ b/flang/lib/Optimizer/Transforms/StackArrays.cpp
@@ -561,7 +561,7 @@ static mlir::Value convertAllocationType(mlir::PatternRewriter &rewriter,
return stack;
fir::HeapType firHeapTy = mlir::cast<fir::HeapType>(heapTy);
- LLVM_ATTRIBUTE_UNUSED fir::ReferenceType firRefTy =
+ [[maybe_unused]] fir::ReferenceType firRefTy =
mlir::cast<fir::ReferenceType>(stackTy);
assert(firHeapTy.getElementType() == firRefTy.getElementType() &&
"Allocations must have the same type");
diff --git a/flang/test/Fir/OpenACC/pointer-like-interface-alloc.mlir b/flang/test/Fir/OpenACC/pointer-like-interface-alloc.mlir
new file mode 100644
index 0000000..0da360a
--- /dev/null
+++ b/flang/test/Fir/OpenACC/pointer-like-interface-alloc.mlir
@@ -0,0 +1,122 @@
+// RUN: fir-opt %s --split-input-file --pass-pipeline="builtin.module(func.func(test-acc-pointer-like-interface{test-mode=alloc}))" 2>&1 | FileCheck %s
+
+// The tests here use a synthetic hlfir.declare in order to ensure that the hlfir dialect is
+// loaded. This is required because the pass used is part of OpenACC test passes outside of
+// flang and the APIs being test may generate hlfir even when it does not appear.
+
+func.func @test_ref_scalar_alloc() {
+ %0 = fir.alloca f32 {test.ptr}
+ %1:2 = hlfir.declare %0 {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated alloc for operation: %{{.*}} = fir.alloca f32 {test.ptr}
+ // CHECK: Generated: %{{.*}} = fir.alloca f32
+ return
+}
+
+// -----
+
+func.func @test_ref_static_array_alloc() {
+ %0 = fir.alloca !fir.array<10x20xf32> {test.ptr}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated alloc for operation: %{{.*}} = fir.alloca !fir.array<10x20xf32> {test.ptr}
+ // CHECK: Generated: %{{.*}} = fir.alloca !fir.array<10x20xf32>
+ return
+}
+
+// -----
+
+func.func @test_ref_derived_type_alloc() {
+ %0 = fir.alloca !fir.type<_QTt{i:i32}> {test.ptr}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated alloc for operation: %{{.*}} = fir.alloca !fir.type<_QTt{i:i32}> {test.ptr}
+ // CHECK: Generated: %{{.*}} = fir.alloca !fir.type<_QTt{i:i32}>
+ return
+}
+
+// -----
+
+func.func @test_heap_scalar_alloc() {
+ %0 = fir.allocmem f32 {test.ptr}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated alloc for operation: %{{.*}} = fir.allocmem f32 {test.ptr}
+ // CHECK: Generated: %{{.*}} = fir.allocmem f32
+ return
+}
+
+// -----
+
+func.func @test_heap_static_array_alloc() {
+ %0 = fir.allocmem !fir.array<10x20xf32> {test.ptr}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated alloc for operation: %{{.*}} = fir.allocmem !fir.array<10x20xf32> {test.ptr}
+ // CHECK: Generated: %{{.*}} = fir.allocmem !fir.array<10x20xf32>
+ return
+}
+
+// -----
+
+func.func @test_ptr_scalar_alloc() {
+ %0 = fir.alloca f32
+ %1 = fir.convert %0 {test.ptr} : (!fir.ref<f32>) -> !fir.ptr<f32>
+ %2:2 = hlfir.declare %0 {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated alloc for operation
+ // CHECK: Generated: %{{.*}} = fir.alloca f32
+ // CHECK: Generated: %{{.*}} = fir.convert %{{.*}} : (!fir.ref<f32>) -> !fir.ptr<f32>
+ return
+}
+
+// -----
+
+func.func @test_llvm_ptr_scalar_alloc() {
+ %0 = fir.alloca f32
+ %1 = fir.convert %0 {test.ptr} : (!fir.ref<f32>) -> !fir.llvm_ptr<f32>
+ %2:2 = hlfir.declare %0 {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated alloc for operation
+ // CHECK: Generated: %{{.*}} = fir.alloca f32
+ // CHECK: Generated: %{{.*}} = fir.convert %{{.*}} : (!fir.ref<f32>) -> !fir.llvm_ptr<f32>
+ return
+}
+
+// -----
+
+func.func @test_dynamic_array_alloc_fails(%arg0: !fir.ref<!fir.array<?xf32>>) {
+ %0 = fir.convert %arg0 {test.ptr} : (!fir.ref<!fir.array<?xf32>>) -> !fir.llvm_ptr<!fir.array<?xf32>>
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Failed to generate alloc for operation: %{{.*}} = fir.convert %{{.*}} {test.ptr} : (!fir.ref<!fir.array<?xf32>>) -> !fir.llvm_ptr<!fir.array<?xf32>>
+ return
+}
+
+// -----
+
+func.func @test_unlimited_polymorphic_alloc_fails() {
+ %0 = fir.alloca !fir.class<none> {test.ptr}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Failed to generate alloc for operation: %{{.*}} = fir.alloca !fir.class<none> {test.ptr}
+ return
+}
+
+// -----
+
+func.func @test_dynamic_char_alloc_fails(%arg0: !fir.ref<!fir.char<1,?>>) {
+ %0 = fir.convert %arg0 {test.ptr} : (!fir.ref<!fir.char<1,?>>) -> !fir.llvm_ptr<!fir.char<1,?>>
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Failed to generate alloc for operation: %{{.*}} = fir.convert %{{.*}} {test.ptr} : (!fir.ref<!fir.char<1,?>>) -> !fir.llvm_ptr<!fir.char<1,?>>
+ return
+}
+
+// -----
+
+func.func @test_static_char_alloc() {
+ %0 = fir.alloca !fir.char<1,10> {test.ptr}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated alloc for operation: %{{.*}} = fir.alloca !fir.char<1,10> {test.ptr}
+ // CHECK: Generated: %{{.*}} = fir.alloca !fir.char<1,10>
+ return
+}
diff --git a/flang/test/Fir/OpenACC/pointer-like-interface-copy.mlir b/flang/test/Fir/OpenACC/pointer-like-interface-copy.mlir
new file mode 100644
index 0000000..99fc012
--- /dev/null
+++ b/flang/test/Fir/OpenACC/pointer-like-interface-copy.mlir
@@ -0,0 +1,120 @@
+// RUN: fir-opt %s --split-input-file --pass-pipeline="builtin.module(func.func(test-acc-pointer-like-interface{test-mode=copy}))" 2>&1 | FileCheck %s
+
+// The tests here use a synthetic hlfir.declare in order to ensure that the hlfir dialect is
+// loaded. This is required because the pass used is part of OpenACC test passes outside of
+// flang and the APIs being test may generate hlfir even when it does not appear.
+
+func.func @test_copy_scalar() {
+ %src = fir.alloca f32 {test.src_ptr}
+ %dest = fir.alloca f32 {test.dest_ptr}
+ %var = fir.alloca f32
+ %0:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated copy from source: %{{.*}} = fir.alloca f32 {test.src_ptr} to destination: %{{.*}} = fir.alloca f32 {test.dest_ptr}
+ // CHECK: Generated: %{{.*}} = fir.load %{{.*}} : !fir.ref<f32>
+ // CHECK: Generated: fir.store %{{.*}} to %{{.*}} : !fir.ref<f32>
+ return
+}
+
+// -----
+
+func.func @test_copy_static_array() {
+ %src = fir.alloca !fir.array<10x20xf32> {test.src_ptr}
+ %dest = fir.alloca !fir.array<10x20xf32> {test.dest_ptr}
+ %var = fir.alloca f32
+ %0:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated copy from source: %{{.*}} = fir.alloca !fir.array<10x20xf32> {test.src_ptr} to destination: %{{.*}} = fir.alloca !fir.array<10x20xf32> {test.dest_ptr}
+ // CHECK: Generated: hlfir.assign %{{.*}} to %{{.*}} : !fir.ref<!fir.array<10x20xf32>>, !fir.ref<!fir.array<10x20xf32>>
+ return
+}
+
+// -----
+
+func.func @test_copy_derived_type() {
+ %src = fir.alloca !fir.type<_QTt{i:i32}> {test.src_ptr}
+ %dest = fir.alloca !fir.type<_QTt{i:i32}> {test.dest_ptr}
+ %var = fir.alloca f32
+ %0:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated copy from source: %{{.*}} = fir.alloca !fir.type<_QTt{i:i32}> {test.src_ptr} to destination: %{{.*}} = fir.alloca !fir.type<_QTt{i:i32}> {test.dest_ptr}
+ // CHECK: Generated: hlfir.assign %{{.*}} to %{{.*}} : !fir.ref<!fir.type<_QTt{i:i32}>>, !fir.ref<!fir.type<_QTt{i:i32}>>
+ return
+}
+
+// -----
+
+func.func @test_copy_heap_scalar() {
+ %src = fir.allocmem f32 {test.src_ptr}
+ %dest = fir.allocmem f32 {test.dest_ptr}
+ %var = fir.alloca f32
+ %0:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated copy from source: %{{.*}} = fir.allocmem f32 {test.src_ptr} to destination: %{{.*}} = fir.allocmem f32 {test.dest_ptr}
+ // CHECK: Generated: %{{.*}} = fir.load %{{.*}} : !fir.heap<f32>
+ // CHECK: Generated: fir.store %{{.*}} to %{{.*}} : !fir.heap<f32>
+ return
+}
+
+// -----
+
+func.func @test_copy_static_char() {
+ %src = fir.alloca !fir.char<1,10> {test.src_ptr}
+ %dest = fir.alloca !fir.char<1,10> {test.dest_ptr}
+ %var = fir.alloca f32
+ %0:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated copy from source: %{{.*}} = fir.alloca !fir.char<1,10> {test.src_ptr} to destination: %{{.*}} = fir.alloca !fir.char<1,10> {test.dest_ptr}
+ // CHECK: Generated: hlfir.assign %{{.*}} to %{{.*}} : !fir.ref<!fir.char<1,10>>, !fir.ref<!fir.char<1,10>>
+ return
+}
+
+// -----
+
+func.func @test_copy_mismatched_types_fails() {
+ %src = fir.alloca f32 {test.src_ptr}
+ %dest = fir.alloca f64 {test.dest_ptr}
+ %var = fir.alloca f32
+ %0:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Failed to generate copy from source: %{{.*}} = fir.alloca f32 {test.src_ptr} to destination: %{{.*}} = fir.alloca f64 {test.dest_ptr}
+ return
+}
+
+// -----
+
+func.func @test_copy_mismatched_shapes_fails() {
+ %src = fir.alloca !fir.array<10xf32> {test.src_ptr}
+ %dest = fir.alloca !fir.array<20xf32> {test.dest_ptr}
+ %var = fir.alloca f32
+ %0:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Failed to generate copy from source: %{{.*}} = fir.alloca !fir.array<10xf32> {test.src_ptr} to destination: %{{.*}} = fir.alloca !fir.array<20xf32> {test.dest_ptr}
+ return
+}
+
+// -----
+
+func.func @test_copy_dynamic_array_fails(%arg0: !fir.ref<!fir.array<?xf32>>, %arg1: !fir.ref<!fir.array<?xf32>>) {
+ %src = fir.convert %arg0 {test.src_ptr} : (!fir.ref<!fir.array<?xf32>>) -> !fir.llvm_ptr<!fir.array<?xf32>>
+ %dest = fir.convert %arg1 {test.dest_ptr} : (!fir.ref<!fir.array<?xf32>>) -> !fir.llvm_ptr<!fir.array<?xf32>>
+ %var = fir.alloca f32
+ %0:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Failed to generate copy from source: %{{.*}} = fir.convert %{{.*}} {test.src_ptr} : (!fir.ref<!fir.array<?xf32>>) -> !fir.llvm_ptr<!fir.array<?xf32>> to destination: %{{.*}} = fir.convert %{{.*}} {test.dest_ptr} : (!fir.ref<!fir.array<?xf32>>) -> !fir.llvm_ptr<!fir.array<?xf32>>
+ return
+}
+
+// -----
+
+func.func @test_copy_unlimited_polymorphic_fails() {
+ %src = fir.alloca !fir.class<none> {test.src_ptr}
+ %dest = fir.alloca !fir.class<none> {test.dest_ptr}
+ %var = fir.alloca f32
+ %0:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Failed to generate copy from source: %{{.*}} = fir.alloca !fir.class<none> {test.src_ptr} to destination: %{{.*}} = fir.alloca !fir.class<none> {test.dest_ptr}
+ return
+}
+
+// -----
+
+func.func @test_copy_dynamic_char_fails(%arg0: !fir.ref<!fir.char<1,?>>, %arg1: !fir.ref<!fir.char<1,?>>) {
+ %src = fir.convert %arg0 {test.src_ptr} : (!fir.ref<!fir.char<1,?>>) -> !fir.llvm_ptr<!fir.char<1,?>>
+ %dest = fir.convert %arg1 {test.dest_ptr} : (!fir.ref<!fir.char<1,?>>) -> !fir.llvm_ptr<!fir.char<1,?>>
+ %var = fir.alloca f32
+ %0:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Failed to generate copy from source: %{{.*}} = fir.convert %{{.*}} {test.src_ptr} : (!fir.ref<!fir.char<1,?>>) -> !fir.llvm_ptr<!fir.char<1,?>> to destination: %{{.*}} = fir.convert %{{.*}} {test.dest_ptr} : (!fir.ref<!fir.char<1,?>>) -> !fir.llvm_ptr<!fir.char<1,?>>
+ return
+}
diff --git a/flang/test/Fir/OpenACC/pointer-like-interface-free.mlir b/flang/test/Fir/OpenACC/pointer-like-interface-free.mlir
new file mode 100644
index 0000000..6334752
--- /dev/null
+++ b/flang/test/Fir/OpenACC/pointer-like-interface-free.mlir
@@ -0,0 +1,94 @@
+// RUN: fir-opt %s --split-input-file --pass-pipeline="builtin.module(func.func(test-acc-pointer-like-interface{test-mode=free}))" 2>&1 | FileCheck %s
+
+// The tests here use a synthetic hlfir.declare in order to ensure that the hlfir dialect is
+// loaded. This is required because the pass used is part of OpenACC test passes outside of
+// flang and the APIs being test may generate hlfir even when it does not appear.
+
+func.func @test_ref_scalar_free() {
+ %0 = fir.alloca f32 {test.ptr}
+ %1:2 = hlfir.declare %0 {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated free for operation: %{{.*}} = fir.alloca f32 {test.ptr}
+ // CHECK-NOT: Generated
+ return
+}
+
+// -----
+
+func.func @test_heap_scalar_free() {
+ %0 = fir.allocmem f32 {test.ptr}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated free for operation: %{{.*}} = fir.allocmem f32 {test.ptr}
+ // CHECK: Generated: fir.freemem %{{.*}} : !fir.heap<f32>
+ return
+}
+
+// -----
+
+func.func @test_heap_array_free() {
+ %0 = fir.allocmem !fir.array<10x20xf32> {test.ptr}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated free for operation: %{{.*}} = fir.allocmem !fir.array<10x20xf32> {test.ptr}
+ // CHECK: Generated: fir.freemem %{{.*}} : !fir.heap<!fir.array<10x20xf32>>
+ return
+}
+
+// -----
+
+func.func @test_convert_walking_free() {
+ %0 = fir.alloca f32
+ %1 = fir.convert %0 {test.ptr} : (!fir.ref<f32>) -> !fir.ptr<f32>
+ %2:2 = hlfir.declare %0 {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated free for operation: %{{.*}} = fir.convert %{{.*}} {test.ptr} : (!fir.ref<f32>) -> !fir.ptr<f32>
+ // CHECK-NOT: Generated
+ return
+}
+
+// -----
+
+func.func @test_declare_walking_free() {
+ %0 = fir.alloca f32
+ %1 = fir.declare %0 {test.ptr, uniq_name = "x"} : (!fir.ref<f32>) -> !fir.ref<f32>
+ %2:2 = hlfir.declare %0 {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated free for operation: %{{.*}} = fir.declare %{{.*}} {test.ptr, uniq_name = "x"} : (!fir.ref<f32>) -> !fir.ref<f32>
+ // CHECK-NOT: Generated
+ return
+}
+
+// -----
+
+func.func @test_hlfir_declare_walking_free() {
+ %0 = fir.alloca f32
+ %1:2 = hlfir.declare %0 {test.ptr, uniq_name = "x"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ %var = fir.alloca f32
+ %2:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated free for operation
+ // CHECK-NOT: Generated
+ return
+}
+
+// -----
+
+func.func @test_heap_through_convert_free() {
+ %0 = fir.allocmem f32
+ %1 = fir.convert %0 {test.ptr} : (!fir.heap<f32>) -> !fir.llvm_ptr<f32>
+ %var = fir.alloca f32
+ %2:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated free for operation: %{{.*}} = fir.convert %{{.*}} {test.ptr} : (!fir.heap<f32>) -> !fir.llvm_ptr<f32>
+ // CHECK: Generated: %{{.*}} = fir.convert %{{.*}} : (!fir.llvm_ptr<f32>) -> !fir.heap<f32>
+ // CHECK: Generated: fir.freemem %{{.*}} : !fir.heap<f32>
+ return
+}
+
+// -----
+
+func.func @test_heap_through_declare_free() {
+ %0 = fir.allocmem f32
+ %1 = fir.declare %0 {test.ptr, uniq_name = "x"} : (!fir.heap<f32>) -> !fir.heap<f32>
+ %var = fir.alloca f32
+ %2:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ // CHECK: Successfully generated free for operation: %{{.*}} = fir.declare %{{.*}} {test.ptr, uniq_name = "x"} : (!fir.heap<f32>) -> !fir.heap<f32>
+ // CHECK: Generated: fir.freemem %{{.*}} : !fir.heap<f32>
+ return
+}
diff --git a/flang/test/Fir/OpenACC/recipe-populate-firstprivate.mlir b/flang/test/Fir/OpenACC/recipe-populate-firstprivate.mlir
new file mode 100644
index 0000000..0c3f3fe
--- /dev/null
+++ b/flang/test/Fir/OpenACC/recipe-populate-firstprivate.mlir
@@ -0,0 +1,166 @@
+// RUN: fir-opt %s --split-input-file --pass-pipeline="builtin.module(test-acc-recipe-populate{recipe-type=firstprivate})" | FileCheck %s
+
+// The tests here use a synthetic hlfir.declare in order to ensure that the hlfir dialect is
+// loaded. This is required because the pass used is part of OpenACC test passes outside of
+// flang and the APIs being test may generate hlfir even when it does not appear.
+
+// Test scalar type (f32)
+// CHECK: acc.firstprivate.recipe @firstprivate_scalar : !fir.ref<f32> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<f32>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca f32
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "scalar"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<f32>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<f32>, %[[DST:.*]]: !fir.ref<f32>):
+// CHECK: %[[LOAD:.*]] = fir.load %[[SRC]] : !fir.ref<f32>
+// CHECK: fir.store %[[LOAD]] to %[[DST]] : !fir.ref<f32>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_scalar() {
+ %0 = fir.alloca f32 {test.var = "scalar"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test integer scalar
+// CHECK: acc.firstprivate.recipe @firstprivate_int : !fir.ref<i32> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<i32>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca i32
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "int"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<i32>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<i32>, %[[DST:.*]]: !fir.ref<i32>):
+// CHECK: %[[LOAD:.*]] = fir.load %[[SRC]] : !fir.ref<i32>
+// CHECK: fir.store %[[LOAD]] to %[[DST]] : !fir.ref<i32>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_int() {
+ %0 = fir.alloca i32 {test.var = "int"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test logical type
+// CHECK: acc.firstprivate.recipe @firstprivate_logical : !fir.ref<!fir.logical<4>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.logical<4>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.logical<4>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "logical"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.logical<4>>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<!fir.logical<4>>, %[[DST:.*]]: !fir.ref<!fir.logical<4>>):
+// CHECK: %[[LOAD:.*]] = fir.load %[[SRC]] : !fir.ref<!fir.logical<4>>
+// CHECK: fir.store %[[LOAD]] to %[[DST]] : !fir.ref<!fir.logical<4>>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_logical() {
+ %0 = fir.alloca !fir.logical<4> {test.var = "logical"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test complex type
+// CHECK: acc.firstprivate.recipe @firstprivate_complex : !fir.ref<complex<f32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<complex<f32>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca complex<f32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "complex"} : (!fir.ref<complex<f32>>) -> (!fir.ref<complex<f32>>, !fir.ref<complex<f32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<complex<f32>>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<complex<f32>>, %[[DST:.*]]: !fir.ref<complex<f32>>):
+// CHECK: %[[LOAD:.*]] = fir.load %[[SRC]] : !fir.ref<complex<f32>>
+// CHECK: fir.store %[[LOAD]] to %[[DST]] : !fir.ref<complex<f32>>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_complex() {
+ %0 = fir.alloca complex<f32> {test.var = "complex"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test 1D static array
+// CHECK: acc.firstprivate.recipe @firstprivate_array_1d : !fir.ref<!fir.array<100xf32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xf32>>):
+// CHECK: %[[C100:.*]] = arith.constant 100 : index
+// CHECK: %[[SHAPE:.*]] = fir.shape %[[C100]] : (index) -> !fir.shape<1>
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.array<100xf32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]](%[[SHAPE]]) {uniq_name = "array_1d"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.array<100xf32>>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<!fir.array<100xf32>>, %[[DST:.*]]: !fir.ref<!fir.array<100xf32>>):
+// CHECK: hlfir.assign %[[SRC]] to %[[DST]] : !fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_array_1d() {
+ %0 = fir.alloca !fir.array<100xf32> {test.var = "array_1d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test 2D static array
+// CHECK: acc.firstprivate.recipe @firstprivate_array_2d : !fir.ref<!fir.array<10x20xi32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<10x20xi32>>):
+// CHECK: %[[C10:.*]] = arith.constant 10 : index
+// CHECK: %[[C20:.*]] = arith.constant 20 : index
+// CHECK: %[[SHAPE:.*]] = fir.shape %[[C10]], %[[C20]] : (index, index) -> !fir.shape<2>
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.array<10x20xi32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]](%[[SHAPE]]) {uniq_name = "array_2d"} : (!fir.ref<!fir.array<10x20xi32>>, !fir.shape<2>) -> (!fir.ref<!fir.array<10x20xi32>>, !fir.ref<!fir.array<10x20xi32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.array<10x20xi32>>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<!fir.array<10x20xi32>>, %[[DST:.*]]: !fir.ref<!fir.array<10x20xi32>>):
+// CHECK: hlfir.assign %[[SRC]] to %[[DST]] : !fir.ref<!fir.array<10x20xi32>>, !fir.ref<!fir.array<10x20xi32>>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_array_2d() {
+ %0 = fir.alloca !fir.array<10x20xi32> {test.var = "array_2d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test derived type with multiple fields
+// CHECK: acc.firstprivate.recipe @firstprivate_derived : !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.type<_QTpoint{x:f32,y:f32,z:f32}>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "derived"} : (!fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>) -> (!fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>, !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>, %[[DST:.*]]: !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>):
+// CHECK: hlfir.assign %[[SRC]] to %[[DST]] : !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>, !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_derived() {
+ %0 = fir.alloca !fir.type<_QTpoint{x:f32,y:f32,z:f32}> {test.var = "derived"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
diff --git a/flang/test/Fir/OpenACC/recipe-populate-private.mlir b/flang/test/Fir/OpenACC/recipe-populate-private.mlir
new file mode 100644
index 0000000..aeb60d6
--- /dev/null
+++ b/flang/test/Fir/OpenACC/recipe-populate-private.mlir
@@ -0,0 +1,223 @@
+// RUN: fir-opt %s --split-input-file --pass-pipeline="builtin.module(test-acc-recipe-populate{recipe-type=private})" | FileCheck %s
+
+// The tests here use a synthetic hlfir.declare in order to ensure that the hlfir dialect is
+// loaded. This is required because the pass used is part of OpenACC test passes outside of
+// flang and the APIs being test may generate hlfir even when it does not appear.
+
+// Test scalar type (f32)
+// CHECK: acc.private.recipe @private_scalar : !fir.ref<f32> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<f32>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca f32
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "scalar"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<f32>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_scalar() {
+ %0 = fir.alloca f32 {test.var = "scalar"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test logical type
+// CHECK: acc.private.recipe @private_logical : !fir.ref<!fir.logical<4>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.logical<4>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.logical<4>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "logical"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.logical<4>>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_logical() {
+ %0 = fir.alloca !fir.logical<4> {test.var = "logical"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test complex type
+// CHECK: acc.private.recipe @private_complex : !fir.ref<complex<f32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<complex<f32>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca complex<f32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "complex"} : (!fir.ref<complex<f32>>) -> (!fir.ref<complex<f32>>, !fir.ref<complex<f32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<complex<f32>>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_complex() {
+ %0 = fir.alloca complex<f32> {test.var = "complex"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test 1D static array
+// CHECK: acc.private.recipe @private_array_1d : !fir.ref<!fir.array<100xf32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xf32>>):
+// CHECK: %[[C100:.*]] = arith.constant 100 : index
+// CHECK: %[[SHAPE:.*]] = fir.shape %[[C100]] : (index) -> !fir.shape<1>
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.array<100xf32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]](%[[SHAPE]]) {uniq_name = "array_1d"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.array<100xf32>>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_array_1d() {
+ %0 = fir.alloca !fir.array<100xf32> {test.var = "array_1d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test 3D static array
+// CHECK: acc.private.recipe @private_array_3d : !fir.ref<!fir.array<5x10x15xi32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<5x10x15xi32>>):
+// CHECK: %[[C5:.*]] = arith.constant 5 : index
+// CHECK: %[[C10:.*]] = arith.constant 10 : index
+// CHECK: %[[C15:.*]] = arith.constant 15 : index
+// CHECK: %[[SHAPE:.*]] = fir.shape %[[C5]], %[[C10]], %[[C15]] : (index, index, index) -> !fir.shape<3>
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.array<5x10x15xi32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]](%[[SHAPE]]) {uniq_name = "array_3d"} : (!fir.ref<!fir.array<5x10x15xi32>>, !fir.shape<3>) -> (!fir.ref<!fir.array<5x10x15xi32>>, !fir.ref<!fir.array<5x10x15xi32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.array<5x10x15xi32>>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_array_3d() {
+ %0 = fir.alloca !fir.array<5x10x15xi32> {test.var = "array_3d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test derived type with multiple fields
+// CHECK: acc.private.recipe @private_derived : !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.type<_QTpoint{x:f32,y:f32,z:f32}>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "derived"} : (!fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>) -> (!fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>, !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_derived() {
+ %0 = fir.alloca !fir.type<_QTpoint{x:f32,y:f32,z:f32}> {test.var = "derived"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test box type with heap scalar (needs destroy)
+// CHECK: acc.private.recipe @private_box_heap_scalar : !fir.ref<!fir.box<!fir.heap<f64>>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<f64>>>):
+// CHECK: %[[BOXALLOC:.*]] = fir.alloca !fir.box<!fir.heap<f64>>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[BOXALLOC]] {uniq_name = "box_heap_scalar"} : (!fir.ref<!fir.box<!fir.heap<f64>>>) -> (!fir.ref<!fir.box<!fir.heap<f64>>>, !fir.ref<!fir.box<!fir.heap<f64>>>)
+// CHECK: %[[SCALAR:.*]] = fir.allocmem f64
+// CHECK: %[[EMBOX:.*]] = fir.embox %[[SCALAR]] : (!fir.heap<f64>) -> !fir.box<!fir.heap<f64>>
+// CHECK: fir.store %[[EMBOX]] to %{{.*}}#0 : !fir.ref<!fir.box<!fir.heap<f64>>>
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.box<!fir.heap<f64>>>
+// CHECK: } destroy {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<f64>>>, %{{.*}}: !fir.ref<!fir.box<!fir.heap<f64>>>):
+// CHECK: acc.terminator
+// CHECK: }
+
+func.func @test_box_heap_scalar() {
+ %0 = fir.alloca !fir.box<!fir.heap<f64>> {test.var = "box_heap_scalar"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test box type with pointer scalar (needs destroy)
+// CHECK: acc.private.recipe @private_box_ptr_scalar : !fir.ref<!fir.box<!fir.ptr<i32>>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.ptr<i32>>>):
+// CHECK: %[[BOXALLOC:.*]] = fir.alloca !fir.box<!fir.ptr<i32>>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[BOXALLOC]] {uniq_name = "box_ptr_scalar"} : (!fir.ref<!fir.box<!fir.ptr<i32>>>) -> (!fir.ref<!fir.box<!fir.ptr<i32>>>, !fir.ref<!fir.box<!fir.ptr<i32>>>)
+// CHECK: %[[SCALAR:.*]] = fir.allocmem i32
+// CHECK: %[[EMBOX:.*]] = fir.embox %[[SCALAR]] : (!fir.heap<i32>) -> !fir.box<!fir.ptr<i32>>
+// CHECK: fir.store %[[EMBOX]] to %{{.*}}#0 : !fir.ref<!fir.box<!fir.ptr<i32>>>
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.box<!fir.ptr<i32>>>
+// CHECK: } destroy {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.ptr<i32>>>, %{{.*}}: !fir.ref<!fir.box<!fir.ptr<i32>>>):
+// CHECK: acc.terminator
+// CHECK: }
+
+func.func @test_box_ptr_scalar() {
+ %0 = fir.alloca !fir.box<!fir.ptr<i32>> {test.var = "box_ptr_scalar"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test box type with 1D heap array (needs destroy)
+// CHECK: acc.private.recipe @private_box_heap_array_1d : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>):
+// CHECK: %[[BOXALLOC:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[BOXALLOC]] {uniq_name = "box_heap_array_1d"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+// CHECK: } destroy {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>, %{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>):
+// CHECK: acc.terminator
+// CHECK: }
+
+func.func @test_box_heap_array_1d() {
+ %0 = fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>> {test.var = "box_heap_array_1d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test box type with 2D heap array (needs destroy)
+// CHECK: acc.private.recipe @private_box_heap_array_2d : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>):
+// CHECK: %[[BOXALLOC:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x?xi64>>>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[BOXALLOC]] {uniq_name = "box_heap_array_2d"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>
+// CHECK: } destroy {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>, %{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>):
+// CHECK: acc.terminator
+// CHECK: }
+
+func.func @test_box_heap_array_2d() {
+ %0 = fir.alloca !fir.box<!fir.heap<!fir.array<?x?xi64>>> {test.var = "box_heap_array_2d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test box type with pointer array (needs destroy)
+// CHECK: acc.private.recipe @private_box_ptr_array : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>):
+// CHECK: %[[BOXALLOC:.*]] = fir.alloca !fir.box<!fir.ptr<!fir.array<?xf32>>>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[BOXALLOC]] {uniq_name = "box_ptr_array"} : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>) -> (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
+// CHECK: } destroy {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, %{{.*}}: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>):
+// CHECK: acc.terminator
+// CHECK: }
+
+func.func @test_box_ptr_array() {
+ %0 = fir.alloca !fir.box<!fir.ptr<!fir.array<?xf32>>> {test.var = "box_ptr_array"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir
index 195e5ad..59f6c73 100644
--- a/flang/test/Fir/basic-program.fir
+++ b/flang/test/Fir/basic-program.fir
@@ -69,6 +69,7 @@ func.func @_QQmain() {
// PASSES-NEXT: InlineHLFIRAssign
// PASSES-NEXT: ConvertHLFIRtoFIR
// PASSES-NEXT: LowerWorkshare
+// PASSES-NEXT: LowerWorkdistribute
// PASSES-NEXT: CSE
// PASSES-NEXT: (S) 0 num-cse'd - Number of operations CSE'd
// PASSES-NEXT: (S) 0 num-dce'd - Number of operations DCE'd
diff --git a/flang/test/Lower/OpenMP/workdistribute-multiple.f90 b/flang/test/Lower/OpenMP/workdistribute-multiple.f90
new file mode 100644
index 0000000..cf1d9dd
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-multiple.f90
@@ -0,0 +1,20 @@
+! RUN: not %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - 2>&1 | FileCheck %s
+
+! CHECK: error: teams has multiple workdistribute ops.
+! CHECK-LABEL: func @_QPteams_workdistribute_1
+subroutine teams_workdistribute_1()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ !$omp teams
+
+ !$omp workdistribute
+ y = a * x + y
+ !$omp end workdistribute
+
+ !$omp workdistribute
+ y = a * y + x
+ !$omp end workdistribute
+ !$omp end teams
+end subroutine teams_workdistribute_1
diff --git a/flang/test/Lower/OpenMP/workdistribute-saxpy-1d.f90 b/flang/test/Lower/OpenMP/workdistribute-saxpy-1d.f90
new file mode 100644
index 0000000..b2dbc0f
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-saxpy-1d.f90
@@ -0,0 +1,39 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+subroutine target_teams_workdistribute()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ !$omp target teams workdistribute
+ y = a * x + y
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
+
+! CHECK-LABEL: func @_QPteams_workdistribute
+subroutine teams_workdistribute()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ !$omp teams workdistribute
+ y = a * x + y
+ !$omp end teams workdistribute
+end subroutine teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-saxpy-2d.f90 b/flang/test/Lower/OpenMP/workdistribute-saxpy-2d.f90
new file mode 100644
index 0000000..09e1211
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-saxpy-2d.f90
@@ -0,0 +1,45 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+subroutine target_teams_workdistribute(a, x, y, rows, cols)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols) :: x, y
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ !$omp target teams workdistribute
+ y = a * x + y
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
+
+! CHECK-LABEL: func @_QPteams_workdistribute
+subroutine teams_workdistribute(a, x, y, rows, cols)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols) :: x, y
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ !$omp teams workdistribute
+ y = a * x + y
+ !$omp end teams workdistribute
+end subroutine teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-saxpy-3d.f90 b/flang/test/Lower/OpenMP/workdistribute-saxpy-3d.f90
new file mode 100644
index 0000000..cf5d023
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-saxpy-3d.f90
@@ -0,0 +1,47 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+subroutine target_teams_workdistribute(a, x, y, rows, cols, depth)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols, depth
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols, depth) :: x, y
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+ ! CHECK: fir.do_loop
+
+ !$omp target teams workdistribute
+ y = a * x + y
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
+
+! CHECK-LABEL: func @_QPteams_workdistribute
+subroutine teams_workdistribute(a, x, y, rows, cols, depth)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols, depth
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols, depth) :: x, y
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+ ! CHECK: fir.do_loop
+
+ !$omp teams workdistribute
+ y = a * x + y
+ !$omp end teams workdistribute
+end subroutine teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-saxpy-and-scalar-assign.f90 b/flang/test/Lower/OpenMP/workdistribute-saxpy-and-scalar-assign.f90
new file mode 100644
index 0000000..516c460
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-saxpy-and-scalar-assign.f90
@@ -0,0 +1,53 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+subroutine target_teams_workdistribute()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ !$omp target teams workdistribute
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ y = a * x + y
+
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ y = 2.0_real32
+
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
+
+! CHECK-LABEL: func @_QPteams_workdistribute
+subroutine teams_workdistribute()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ !$omp teams workdistribute
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ y = a * x + y
+
+ ! CHECK: fir.call @_FortranAAssign
+ y = 2.0_real32
+
+ !$omp end teams workdistribute
+end subroutine teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-saxpy-two-2d.f90 b/flang/test/Lower/OpenMP/workdistribute-saxpy-two-2d.f90
new file mode 100644
index 0000000..4aeb2e8
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-saxpy-two-2d.f90
@@ -0,0 +1,68 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+subroutine target_teams_workdistribute(a, x, y, rows, cols)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols) :: x, y
+
+ !$omp target teams workdistribute
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ y = a * x + y
+
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ y = a * y + x
+
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
+
+! CHECK-LABEL: func @_QPteams_workdistribute
+subroutine teams_workdistribute(a, x, y, rows, cols)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols) :: x, y
+
+ !$omp teams workdistribute
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ y = a * x + y
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ y = a * y + x
+
+ !$omp end teams workdistribute
+end subroutine teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-scalar-assign.f90 b/flang/test/Lower/OpenMP/workdistribute-scalar-assign.f90
new file mode 100644
index 0000000..3062b35
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-scalar-assign.f90
@@ -0,0 +1,29 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute_scalar_assign
+subroutine target_teams_workdistribute_scalar_assign()
+ integer :: aa(10)
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ !$omp target teams workdistribute
+ aa = 20
+ !$omp end target teams workdistribute
+
+end subroutine target_teams_workdistribute_scalar_assign
+
+! CHECK-LABEL: func @_QPteams_workdistribute_scalar_assign
+subroutine teams_workdistribute_scalar_assign()
+ integer :: aa(10)
+ ! CHECK: fir.call @_FortranAAssign
+ !$omp teams workdistribute
+ aa = 20
+ !$omp end teams workdistribute
+
+end subroutine teams_workdistribute_scalar_assign
diff --git a/flang/test/Lower/OpenMP/workdistribute-target-teams-clauses.f90 b/flang/test/Lower/OpenMP/workdistribute-target-teams-clauses.f90
new file mode 100644
index 0000000..4a08e53
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-target-teams-clauses.f90
@@ -0,0 +1,32 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+! CHECK: omp.target_data map_entries({{.*}})
+! CHECK: omp.target thread_limit({{.*}}) host_eval({{.*}}) map_entries({{.*}})
+! CHECK: omp.teams num_teams({{.*}})
+! CHECK: omp.parallel
+! CHECK: omp.distribute
+! CHECK: omp.wsloop
+! CHECK: omp.loop_nest
+
+subroutine target_teams_workdistribute()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ integer :: i
+
+ a = 2.0_real32
+ x = [(real(i, real32), i = 1, 10)]
+ y = [(real(i * 0.5, real32), i = 1, 10)]
+
+ !$omp target teams workdistribute &
+ !$omp& num_teams(4) &
+ !$omp& thread_limit(8) &
+ !$omp& default(shared) &
+ !$omp& private(i) &
+ !$omp& map(to: x) &
+ !$omp& map(tofrom: y)
+ y = a * x + y
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-after.f90 b/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-after.f90
new file mode 100644
index 0000000..f9c5a77
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-after.f90
@@ -0,0 +1,22 @@
+! RUN: not %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - 2>&1 | FileCheck %s
+
+! CHECK: error: teams has omp ops other than workdistribute. Lowering not implemented yet.
+! CHECK-LABEL: func @_QPteams_workdistribute_1
+subroutine teams_workdistribute_1()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ !$omp teams
+
+ !$omp workdistribute
+ y = a * x + y
+ !$omp end workdistribute
+
+ !$omp distribute
+ do i = 1, 10
+ x(i) = real(i, kind=real32)
+ end do
+ !$omp end distribute
+ !$omp end teams
+end subroutine teams_workdistribute_1
diff --git a/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-before.f90 b/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-before.f90
new file mode 100644
index 0000000..3ef7f90
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-before.f90
@@ -0,0 +1,22 @@
+! RUN: not %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - 2>&1 | FileCheck %s
+
+! CHECK: error: teams has omp ops other than workdistribute. Lowering not implemented yet.
+! CHECK-LABEL: func @_QPteams_workdistribute_1
+subroutine teams_workdistribute_1()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ !$omp teams
+
+ !$omp distribute
+ do i = 1, 10
+ x(i) = real(i, kind=real32)
+ end do
+ !$omp end distribute
+
+ !$omp workdistribute
+ y = a * x + y
+ !$omp end workdistribute
+ !$omp end teams
+end subroutine teams_workdistribute_1
diff --git a/flang/test/Lower/polymorphic-temp.f90 b/flang/test/Lower/polymorphic-temp.f90
index a9db9ba..ac3cbdb 100644
--- a/flang/test/Lower/polymorphic-temp.f90
+++ b/flang/test/Lower/polymorphic-temp.f90
@@ -223,4 +223,75 @@ contains
! CHECK: %[[A_REBOX:.*]] = fir.rebox %[[LOAD_A]] : (!fir.class<!fir.heap<!fir.type<_QMpoly_tmpTp1{a:i32}>>>) -> !fir.box<!fir.heap<!fir.type<_QMpoly_tmpTp1{a:i32}>>>
! CHECK: %{{.*}} = arith.select %[[CMPI]], %[[A_REBOX]], %[[LOAD_B]] : !fir.box<!fir.heap<!fir.type<_QMpoly_tmpTp1{a:i32}>>>
+ subroutine check_unlimited_poly(a)
+ class(*), intent(in) :: a
+ end subroutine
+
+ subroutine test_merge_intrinsic3(a, b, i)
+ class(*), intent(in) :: a, b
+ integer, intent(in) :: i
+
+ call check_unlimited_poly(merge(a, b, i==1))
+ end subroutine
+
+! CHECK-LABEL: func.func @_QMpoly_tmpPtest_merge_intrinsic3(
+! CHECK-SAME: %[[A:.*]]: !fir.class<none> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.class<none> {fir.bindc_name = "b"}, %[[I:.*]]: !fir.ref<i32> {fir.bindc_name = "i"}) {
+! CHECK: %[[V_0:[0-9]+]] = fir.load %[[I]] : !fir.ref<i32>
+! CHECK: %[[C1:.*]] = arith.constant 1 : i32
+! CHECK: %[[V_1:[0-9]+]] = arith.cmpi eq, %[[V_0]], %[[C1]] : i32
+! CHECK: %[[V_2:[0-9]+]] = arith.select %[[V_1]], %[[A]], %[[B]] : !fir.class<none>
+! CHECK: fir.call @_QMpoly_tmpPcheck_unlimited_poly(%[[V_2]]) fastmath<contract> : (!fir.class<none>) -> ()
+
+ subroutine test_merge_intrinsic4(i)
+ integer, intent(in) :: i
+ class(*), allocatable :: a, b
+
+ call check_unlimited_poly(merge(a, b, i==1))
+ end subroutine
+
+! CHECK-LABEL: func.func @_QMpoly_tmpPtest_merge_intrinsic4(
+! CHECK-SAME: %[[I:.*]]: !fir.ref<i32> {fir.bindc_name = "i"}) {
+! CHECK: %[[V_0:[0-9]+]] = fir.alloca !fir.class<!fir.heap<none>> {bindc_name = "a", uniq_name = "_QMpoly_tmpFtest_merge_intrinsic4Ea"}
+! CHECK: %[[V_1:[0-9]+]] = fir.zero_bits !fir.heap<none>
+! CHECK: %[[V_2:[0-9]+]] = fir.embox %[[V_1]] : (!fir.heap<none>) -> !fir.class<!fir.heap<none>>
+! CHECK: fir.store %[[V_2]] to %[[V_0]] : !fir.ref<!fir.class<!fir.heap<none>>>
+! CHECK: %[[V_3:[0-9]+]] = fir.alloca !fir.class<!fir.heap<none>> {bindc_name = "b", uniq_name = "_QMpoly_tmpFtest_merge_intrinsic4Eb"}
+! CHECK: %[[V_4:[0-9]+]] = fir.zero_bits !fir.heap<none>
+! CHECK: %[[V_5:[0-9]+]] = fir.embox %[[V_4]] : (!fir.heap<none>) -> !fir.class<!fir.heap<none>>
+! CHECK: fir.store %[[V_5]] to %[[V_3]] : !fir.ref<!fir.class<!fir.heap<none>>>
+! CHECK: %[[V_6:[0-9]+]] = fir.load %[[V_0]] : !fir.ref<!fir.class<!fir.heap<none>>>
+! CHECK: %[[V_7:[0-9]+]] = fir.load %[[V_3]] : !fir.ref<!fir.class<!fir.heap<none>>>
+! CHECK: %[[V_8:[0-9]+]] = fir.load %[[I]] : !fir.ref<i32>
+! CHECK: %[[C1:.*]] = arith.constant 1 : i32
+! CHECK: %[[V_9:[0-9]+]] = arith.cmpi eq, %[[V_8]], %[[C1]] : i32
+! CHECK: %[[V_10:[0-9]+]] = arith.select %[[V_9]], %[[V_6]], %[[V_7]] : !fir.class<!fir.heap<none>>
+! CHECK: %[[V_11:[0-9]+]] = fir.rebox %[[V_10]] : (!fir.class<!fir.heap<none>>) -> !fir.class<none>
+! CHECK: fir.call @_QMpoly_tmpPcheck_unlimited_poly(%[[V_11]]) fastmath<contract> : (!fir.class<none>) -> ()
+
+ subroutine test_merge_intrinsic5(i)
+ integer, intent(in) :: i
+ class(*), pointer :: a, b
+
+ call check_unlimited_poly(merge(a, b, i==1))
+ end subroutine
+
+! CHECK-LABEL: func.func @_QMpoly_tmpPtest_merge_intrinsic5(
+! CHECK-SAME: %[[I:.*]]: !fir.ref<i32> {fir.bindc_name = "i"}) {
+! CHECK: %[[V_0:[0-9]+]] = fir.alloca !fir.class<!fir.ptr<none>> {bindc_name = "a", uniq_name = "_QMpoly_tmpFtest_merge_intrinsic5Ea"}
+! CHECK: %[[V_1:[0-9]+]] = fir.zero_bits !fir.ptr<none>
+! CHECK: %[[V_2:[0-9]+]] = fir.embox %[[V_1]] : (!fir.ptr<none>) -> !fir.class<!fir.ptr<none>>
+! CHECK: fir.store %[[V_2]] to %[[V_0]] : !fir.ref<!fir.class<!fir.ptr<none>>>
+! CHECK: %[[V_3:[0-9]+]] = fir.alloca !fir.class<!fir.ptr<none>> {bindc_name = "b", uniq_name = "_QMpoly_tmpFtest_merge_intrinsic5Eb"}
+! CHECK: %[[V_4:[0-9]+]] = fir.zero_bits !fir.ptr<none>
+! CHECK: %[[V_5:[0-9]+]] = fir.embox %[[V_4]] : (!fir.ptr<none>) -> !fir.class<!fir.ptr<none>>
+! CHECK: fir.store %[[V_5]] to %[[V_3]] : !fir.ref<!fir.class<!fir.ptr<none>>>
+! CHECK: %[[V_6:[0-9]+]] = fir.load %[[V_0]] : !fir.ref<!fir.class<!fir.ptr<none>>>
+! CHECK: %[[V_7:[0-9]+]] = fir.load %[[V_3]] : !fir.ref<!fir.class<!fir.ptr<none>>>
+! CHECK: %[[V_8:[0-9]+]] = fir.load %[[I]] : !fir.ref<i32>
+! CHECK: %[[C1:.*]] = arith.constant 1 : i32
+! CHECK: %[[V_9:[0-9]+]] = arith.cmpi eq, %[[V_8]], %[[C1]] : i32
+! CHECK: %[[V_10:[0-9]+]] = arith.select %[[V_9]], %[[V_6]], %[[V_7]] : !fir.class<!fir.ptr<none>>
+! CHECK: %[[V_11:[0-9]+]] = fir.rebox %[[V_10]] : (!fir.class<!fir.ptr<none>>) -> !fir.class<none>
+! CHECK: fir.call @_QMpoly_tmpPcheck_unlimited_poly(%[[V_11]]) fastmath<contract> : (!fir.class<none>) -> ()
+
end module
diff --git a/flang/test/Transforms/OpenMP/lower-workdistribute-doloop.mlir b/flang/test/Transforms/OpenMP/lower-workdistribute-doloop.mlir
new file mode 100644
index 0000000..00d10d6
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workdistribute-doloop.mlir
@@ -0,0 +1,33 @@
+// RUN: fir-opt --lower-workdistribute %s | FileCheck %s
+
+// CHECK-LABEL: func.func @x({{.*}})
+// CHECK: omp.teams {
+// CHECK: omp.parallel {
+// CHECK: omp.distribute {
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_1:.*]]) : index = (%[[ARG0:.*]]) to (%[[ARG1:.*]]) inclusive step (%[[ARG2:.*]]) {
+// CHECK: %[[VAL_0:.*]] = arith.constant 0 : index
+// CHECK: fir.store %[[VAL_0]] to %[[ARG4:.*]] : !fir.ref<index>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: } {omp.composite}
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+func.func @x(%lb : index, %ub : index, %step : index, %b : i1, %addr : !fir.ref<index>) {
+ omp.teams {
+ omp.workdistribute {
+ fir.do_loop %iv = %lb to %ub step %step unordered {
+ %zero = arith.constant 0 : index
+ fir.store %zero to %addr : !fir.ref<index>
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workdistribute-fission-host.mlir b/flang/test/Transforms/OpenMP/lower-workdistribute-fission-host.mlir
new file mode 100644
index 0000000..04e60ca
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workdistribute-fission-host.mlir
@@ -0,0 +1,117 @@
+// RUN: fir-opt --lower-workdistribute %s | FileCheck %s
+// Test lowering of workdistribute after fission on host device.
+
+// CHECK-LABEL: func.func @x(
+// CHECK: %[[VAL_0:.*]] = fir.alloca index {bindc_name = "lb"}
+// CHECK: fir.store %[[ARG0:.*]] to %[[VAL_0]] : !fir.ref<index>
+// CHECK: %[[VAL_1:.*]] = fir.alloca index {bindc_name = "ub"}
+// CHECK: fir.store %[[ARG1:.*]] to %[[VAL_1]] : !fir.ref<index>
+// CHECK: %[[VAL_2:.*]] = fir.alloca index {bindc_name = "step"}
+// CHECK: fir.store %[[ARG2:.*]] to %[[VAL_2]] : !fir.ref<index>
+// CHECK: %[[VAL_3:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+// CHECK: %[[VAL_4:.*]] = omp.map.info var_ptr(%[[VAL_1]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+// CHECK: %[[VAL_5:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "step"}
+// CHECK: %[[VAL_6:.*]] = omp.map.info var_ptr(%[[ARG3:.*]] : !fir.ref<index>, index) map_clauses(tofrom) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+// CHECK: %[[VAL_7:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+// CHECK: %[[VAL_8:.*]] = omp.map.info var_ptr(%[[VAL_1]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+// CHECK: %[[VAL_9:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "step"}
+// CHECK: %[[VAL_10:.*]] = omp.map.info var_ptr(%[[ARG3]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+// CHECK: omp.target_data map_entries(%[[VAL_3]], %[[VAL_4]], %[[VAL_5]], %[[VAL_6]] : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>) {
+// CHECK: %[[VAL_11:.*]] = fir.alloca index
+// CHECK: %[[VAL_12:.*]] = omp.map.info var_ptr(%[[VAL_11]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_13:.*]] = omp.map.info var_ptr(%[[VAL_11]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_14:.*]] = fir.alloca index
+// CHECK: %[[VAL_15:.*]] = omp.map.info var_ptr(%[[VAL_14]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_16:.*]] = omp.map.info var_ptr(%[[VAL_14]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_17:.*]] = fir.alloca index
+// CHECK: %[[VAL_18:.*]] = omp.map.info var_ptr(%[[VAL_17]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_19:.*]] = omp.map.info var_ptr(%[[VAL_17]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_20:.*]] = fir.alloca !fir.heap<index>
+// CHECK: %[[VAL_21:.*]] = omp.map.info var_ptr(%[[VAL_20]] : !fir.ref<!fir.heap<index>>, !fir.heap<index>) map_clauses(from) capture(ByRef) -> !fir.ref<!fir.heap<index>> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_22:.*]] = omp.map.info var_ptr(%[[VAL_20]] : !fir.ref<!fir.heap<index>>, !fir.heap<index>) map_clauses(to) capture(ByRef) -> !fir.ref<!fir.heap<index>> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_23:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_0]] : !fir.ref<index>
+// CHECK: %[[VAL_25:.*]] = fir.load %[[VAL_1]] : !fir.ref<index>
+// CHECK: %[[VAL_26:.*]] = fir.load %[[VAL_2]] : !fir.ref<index>
+// CHECK: %[[VAL_27:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_25]], %[[VAL_25]] : index
+// CHECK: %[[VAL_29:.*]] = omp.target_allocmem %[[VAL_23]] : i32, index, %[[VAL_27]] {uniq_name = "dev_buf"}
+// CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_29]] : (i64) -> !fir.heap<index>
+// CHECK: fir.store %[[VAL_24]] to %[[VAL_11]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_25]] to %[[VAL_14]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_26]] to %[[VAL_17]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_30]] to %[[VAL_20]] : !fir.ref<!fir.heap<index>>
+// CHECK: omp.target host_eval(%[[VAL_24]] -> %[[VAL_31:.*]], %[[VAL_25]] -> %[[VAL_32:.*]], %[[VAL_26]] -> %[[VAL_33:.*]] : index, index, index) map_entries(%[[VAL_7]] -> %[[VAL_34:.*]], %[[VAL_8]] -> %[[VAL_35:.*]], %[[VAL_9]] -> %[[VAL_36:.*]], %[[VAL_10]] -> %[[VAL_37:.*]], %[[VAL_13]] -> %[[VAL_38:.*]], %[[VAL_16]] -> %[[VAL_39:.*]], %[[VAL_19]] -> %[[VAL_40:.*]], %[[VAL_22]] -> %[[VAL_41:.*]] : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<!fir.heap<index>>) {
+// CHECK: %[[VAL_42:.*]] = fir.load %[[VAL_38]] : !fir.ref<index>
+// CHECK: %[[VAL_43:.*]] = fir.load %[[VAL_39]] : !fir.ref<index>
+// CHECK: %[[VAL_44:.*]] = fir.load %[[VAL_40]] : !fir.ref<index>
+// CHECK: %[[VAL_45:.*]] = fir.load %[[VAL_41]] : !fir.ref<!fir.heap<index>>
+// CHECK: %[[VAL_46:.*]] = arith.addi %[[VAL_43]], %[[VAL_43]] : index
+// CHECK: omp.teams {
+// CHECK: omp.parallel {
+// CHECK: omp.distribute {
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_47:.*]]) : index = (%[[VAL_31]]) to (%[[VAL_32]]) inclusive step (%[[VAL_33]]) {
+// CHECK: fir.store %[[VAL_46]] to %[[VAL_45]] : !fir.heap<index>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: } {omp.composite}
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_48:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_49:.*]] = fir.load %[[VAL_11]] : !fir.ref<index>
+// CHECK: %[[VAL_50:.*]] = fir.load %[[VAL_14]] : !fir.ref<index>
+// CHECK: %[[VAL_51:.*]] = fir.load %[[VAL_17]] : !fir.ref<index>
+// CHECK: %[[VAL_52:.*]] = fir.load %[[VAL_20]] : !fir.ref<!fir.heap<index>>
+// CHECK: %[[VAL_53:.*]] = arith.addi %[[VAL_50]], %[[VAL_50]] : index
+// CHECK: fir.store %[[VAL_49]] to %[[VAL_52]] : !fir.heap<index>
+// CHECK: %[[VAL_54:.*]] = fir.convert %[[VAL_52]] : (!fir.heap<index>) -> i64
+// CHECK: omp.target_freemem %[[VAL_48]], %[[VAL_54]] : i32, i64
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+
+module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu", omp.is_gpu = false, omp.is_target_device = false} {
+func.func @x(%lb : index, %ub : index, %step : index, %addr : !fir.ref<index>) {
+ %lb_ref = fir.alloca index {bindc_name = "lb"}
+ fir.store %lb to %lb_ref : !fir.ref<index>
+ %ub_ref = fir.alloca index {bindc_name = "ub"}
+ fir.store %ub to %ub_ref : !fir.ref<index>
+ %step_ref = fir.alloca index {bindc_name = "step"}
+ fir.store %step to %step_ref : !fir.ref<index>
+
+ %lb_map = omp.map.info var_ptr(%lb_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+ %ub_map = omp.map.info var_ptr(%ub_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+ %step_map = omp.map.info var_ptr(%step_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "step"}
+ %addr_map = omp.map.info var_ptr(%addr : !fir.ref<index>, index) map_clauses(tofrom) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+
+ omp.target map_entries(%lb_map -> %ARG0, %ub_map -> %ARG1, %step_map -> %ARG2, %addr_map -> %ARG3 : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>) {
+ %lb_val = fir.load %ARG0 : !fir.ref<index>
+ %ub_val = fir.load %ARG1 : !fir.ref<index>
+ %step_val = fir.load %ARG2 : !fir.ref<index>
+ %one = arith.constant 1 : index
+
+ %20 = arith.addi %ub_val, %ub_val : index
+ omp.teams {
+ omp.workdistribute {
+ %dev_mem = fir.allocmem index, %one {uniq_name = "dev_buf"}
+ fir.do_loop %iv = %lb_val to %ub_val step %step_val unordered {
+ fir.store %20 to %dev_mem : !fir.heap<index>
+ }
+ fir.store %lb_val to %dev_mem : !fir.heap<index>
+ fir.freemem %dev_mem : !fir.heap<index>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workdistribute-fission-target.mlir b/flang/test/Transforms/OpenMP/lower-workdistribute-fission-target.mlir
new file mode 100644
index 0000000..062eb70
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workdistribute-fission-target.mlir
@@ -0,0 +1,118 @@
+// RUN: fir-opt --lower-workdistribute %s | FileCheck %s
+// Test lowering of workdistribute after fission on host device.
+
+// CHECK-LABEL: func.func @x(
+// CHECK: %[[VAL_0:.*]] = fir.alloca index {bindc_name = "lb"}
+// CHECK: fir.store %[[ARG0:.*]] to %[[VAL_0]] : !fir.ref<index>
+// CHECK: %[[VAL_1:.*]] = fir.alloca index {bindc_name = "ub"}
+// CHECK: fir.store %[[ARG1:.*]] to %[[VAL_1]] : !fir.ref<index>
+// CHECK: %[[VAL_2:.*]] = fir.alloca index {bindc_name = "step"}
+// CHECK: fir.store %[[ARG2:.*]] to %[[VAL_2]] : !fir.ref<index>
+// CHECK: %[[VAL_3:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+// CHECK: %[[VAL_4:.*]] = omp.map.info var_ptr(%[[VAL_1]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+// CHECK: %[[VAL_5:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "step"}
+// CHECK: %[[VAL_6:.*]] = omp.map.info var_ptr(%[[ARG3:.*]] : !fir.ref<index>, index) map_clauses(tofrom) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+// CHECK: %[[VAL_7:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+// CHECK: %[[VAL_8:.*]] = omp.map.info var_ptr(%[[VAL_1]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+// CHECK: %[[VAL_9:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "step"}
+// CHECK: %[[VAL_10:.*]] = omp.map.info var_ptr(%[[ARG3]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+// CHECK: omp.target_data map_entries(%[[VAL_3]], %[[VAL_4]], %[[VAL_5]], %[[VAL_6]] : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>) {
+// CHECK: %[[VAL_11:.*]] = fir.alloca index
+// CHECK: %[[VAL_12:.*]] = omp.map.info var_ptr(%[[VAL_11]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_13:.*]] = omp.map.info var_ptr(%[[VAL_11]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_14:.*]] = fir.alloca index
+// CHECK: %[[VAL_15:.*]] = omp.map.info var_ptr(%[[VAL_14]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_16:.*]] = omp.map.info var_ptr(%[[VAL_14]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_17:.*]] = fir.alloca index
+// CHECK: %[[VAL_18:.*]] = omp.map.info var_ptr(%[[VAL_17]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_19:.*]] = omp.map.info var_ptr(%[[VAL_17]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_20:.*]] = fir.alloca !fir.heap<index>
+// CHECK: %[[VAL_21:.*]] = omp.map.info var_ptr(%[[VAL_20]] : !fir.ref<!fir.heap<index>>, !fir.heap<index>) map_clauses(from) capture(ByRef) -> !fir.ref<!fir.heap<index>> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_22:.*]] = omp.map.info var_ptr(%[[VAL_20]] : !fir.ref<!fir.heap<index>>, !fir.heap<index>) map_clauses(to) capture(ByRef) -> !fir.ref<!fir.heap<index>> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_23:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_0]] : !fir.ref<index>
+// CHECK: %[[VAL_25:.*]] = fir.load %[[VAL_1]] : !fir.ref<index>
+// CHECK: %[[VAL_26:.*]] = fir.load %[[VAL_2]] : !fir.ref<index>
+// CHECK: %[[VAL_27:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_25]], %[[VAL_25]] : index
+// CHECK: %[[VAL_29:.*]] = omp.target_allocmem %[[VAL_23]] : i32, index, %[[VAL_27]] {uniq_name = "dev_buf"}
+// CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_29]] : (i64) -> !fir.heap<index>
+// CHECK: fir.store %[[VAL_24]] to %[[VAL_11]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_25]] to %[[VAL_14]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_26]] to %[[VAL_17]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_30]] to %[[VAL_20]] : !fir.ref<!fir.heap<index>>
+// CHECK: omp.target map_entries(%[[VAL_7]] -> %[[VAL_31:.*]], %[[VAL_8]] -> %[[VAL_32:.*]], %[[VAL_9]] -> %[[VAL_33:.*]], %[[VAL_10]] -> %[[VAL_34:.*]], %[[VAL_13]] -> %[[VAL_35:.*]], %[[VAL_16]] -> %[[VAL_36:.*]], %[[VAL_19]] -> %[[VAL_37:.*]], %[[VAL_22]] -> %[[VAL_38:.*]] : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<!fir.heap<index>>) {
+// CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_35]] : !fir.ref<index>
+// CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_36]] : !fir.ref<index>
+// CHECK: %[[VAL_41:.*]] = fir.load %[[VAL_37]] : !fir.ref<index>
+// CHECK: %[[VAL_42:.*]] = fir.load %[[VAL_38]] : !fir.ref<!fir.heap<index>>
+// CHECK: %[[VAL_43:.*]] = arith.addi %[[VAL_40]], %[[VAL_40]] : index
+// CHECK: omp.teams {
+// CHECK: omp.parallel {
+// CHECK: omp.distribute {
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_44:.*]]) : index = (%[[VAL_39]]) to (%[[VAL_40]]) inclusive step (%[[VAL_41]]) {
+// CHECK: fir.store %[[VAL_43]] to %[[VAL_42]] : !fir.heap<index>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: } {omp.composite}
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_45:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_46:.*]] = fir.load %[[VAL_11]] : !fir.ref<index>
+// CHECK: %[[VAL_47:.*]] = fir.load %[[VAL_14]] : !fir.ref<index>
+// CHECK: %[[VAL_48:.*]] = fir.load %[[VAL_17]] : !fir.ref<index>
+// CHECK: %[[VAL_49:.*]] = fir.load %[[VAL_20]] : !fir.ref<!fir.heap<index>>
+// CHECK: %[[VAL_50:.*]] = arith.addi %[[VAL_47]], %[[VAL_47]] : index
+// CHECK: fir.store %[[VAL_46]] to %[[VAL_49]] : !fir.heap<index>
+// CHECK: %[[VAL_51:.*]] = fir.convert %[[VAL_49]] : (!fir.heap<index>) -> i64
+// CHECK: omp.target_freemem %[[VAL_45]], %[[VAL_51]] : i32, i64
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+
+
+module attributes {llvm.target_triple = "amdgcn-amd-amdhsa", omp.is_gpu = true, omp.is_target_device = true} {
+func.func @x(%lb : index, %ub : index, %step : index, %addr : !fir.ref<index>) {
+ %lb_ref = fir.alloca index {bindc_name = "lb"}
+ fir.store %lb to %lb_ref : !fir.ref<index>
+ %ub_ref = fir.alloca index {bindc_name = "ub"}
+ fir.store %ub to %ub_ref : !fir.ref<index>
+ %step_ref = fir.alloca index {bindc_name = "step"}
+ fir.store %step to %step_ref : !fir.ref<index>
+
+ %lb_map = omp.map.info var_ptr(%lb_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+ %ub_map = omp.map.info var_ptr(%ub_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+ %step_map = omp.map.info var_ptr(%step_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "step"}
+ %addr_map = omp.map.info var_ptr(%addr : !fir.ref<index>, index) map_clauses(tofrom) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+
+ omp.target map_entries(%lb_map -> %ARG0, %ub_map -> %ARG1, %step_map -> %ARG2, %addr_map -> %ARG3 : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>) {
+ %lb_val = fir.load %ARG0 : !fir.ref<index>
+ %ub_val = fir.load %ARG1 : !fir.ref<index>
+ %step_val = fir.load %ARG2 : !fir.ref<index>
+ %one = arith.constant 1 : index
+
+ %20 = arith.addi %ub_val, %ub_val : index
+ omp.teams {
+ omp.workdistribute {
+ %dev_mem = fir.allocmem index, %one {uniq_name = "dev_buf"}
+ fir.do_loop %iv = %lb_val to %ub_val step %step_val unordered {
+ fir.store %20 to %dev_mem : !fir.heap<index>
+ }
+ fir.store %lb_val to %dev_mem : !fir.heap<index>
+ fir.freemem %dev_mem : !fir.heap<index>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workdistribute-fission.mlir b/flang/test/Transforms/OpenMP/lower-workdistribute-fission.mlir
new file mode 100644
index 0000000..c562b70
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workdistribute-fission.mlir
@@ -0,0 +1,71 @@
+// RUN: fir-opt --lower-workdistribute %s | FileCheck %s
+
+// CHECK-LABEL: func.func @test_fission_workdistribute(
+// CHECK: %[[VAL_0:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_2:.*]] = arith.constant 9 : index
+// CHECK: %[[VAL_3:.*]] = arith.constant 5.000000e+00 : f32
+// CHECK: fir.store %[[VAL_3]] to %[[ARG2:.*]] : !fir.ref<f32>
+// CHECK: omp.teams {
+// CHECK: omp.parallel {
+// CHECK: omp.distribute {
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_4:.*]]) : index = (%[[VAL_0]]) to (%[[VAL_2]]) inclusive step (%[[VAL_1]]) {
+// CHECK: %[[VAL_5:.*]] = fir.coordinate_of %[[ARG0:.*]], %[[VAL_4]] : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+// CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_5]] : !fir.ref<f32>
+// CHECK: %[[VAL_7:.*]] = fir.coordinate_of %[[ARG1:.*]], %[[VAL_4]] : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+// CHECK: fir.store %[[VAL_6]] to %[[VAL_7]] : !fir.ref<f32>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: } {omp.composite}
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: fir.call @regular_side_effect_func(%[[ARG2:.*]]) : (!fir.ref<f32>) -> ()
+// CHECK: fir.call @my_fir_parallel_runtime_func(%[[ARG3:.*]]) : (!fir.ref<f32>) -> ()
+// CHECK: fir.do_loop %[[VAL_8:.*]] = %[[VAL_0]] to %[[VAL_2]] step %[[VAL_1]] {
+// CHECK: %[[VAL_9:.*]] = fir.coordinate_of %[[ARG0]], %[[VAL_8]] : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+// CHECK: fir.store %[[VAL_3]] to %[[VAL_9]] : !fir.ref<f32>
+// CHECK: }
+// CHECK: %[[VAL_10:.*]] = fir.load %[[ARG2:.*]] : !fir.ref<f32>
+// CHECK: fir.store %[[VAL_10]] to %[[ARG3:.*]] : !fir.ref<f32>
+// CHECK: return
+// CHECK: }
+module {
+func.func @regular_side_effect_func(%arg0: !fir.ref<f32>) {
+ return
+}
+func.func @my_fir_parallel_runtime_func(%arg0: !fir.ref<f32>) attributes {fir.runtime} {
+ return
+}
+func.func @test_fission_workdistribute(%arr1: !fir.ref<!fir.array<10xf32>>, %arr2: !fir.ref<!fir.array<10xf32>>, %scalar_ref1: !fir.ref<f32>, %scalar_ref2: !fir.ref<f32>) {
+ %c0_idx = arith.constant 0 : index
+ %c1_idx = arith.constant 1 : index
+ %c9_idx = arith.constant 9 : index
+ %float_val = arith.constant 5.0 : f32
+ omp.teams {
+ omp.workdistribute {
+ fir.store %float_val to %scalar_ref1 : !fir.ref<f32>
+ fir.do_loop %iv = %c0_idx to %c9_idx step %c1_idx unordered {
+ %elem_ptr_arr1 = fir.coordinate_of %arr1, %iv : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+ %loaded_val_loop1 = fir.load %elem_ptr_arr1 : !fir.ref<f32>
+ %elem_ptr_arr2 = fir.coordinate_of %arr2, %iv : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+ fir.store %loaded_val_loop1 to %elem_ptr_arr2 : !fir.ref<f32>
+ }
+ fir.call @regular_side_effect_func(%scalar_ref1) : (!fir.ref<f32>) -> ()
+ fir.call @my_fir_parallel_runtime_func(%scalar_ref2) : (!fir.ref<f32>) -> ()
+ fir.do_loop %jv = %c0_idx to %c9_idx step %c1_idx {
+ %elem_ptr_ordered_loop = fir.coordinate_of %arr1, %jv : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+ fir.store %float_val to %elem_ptr_ordered_loop : !fir.ref<f32>
+ }
+ %loaded_for_hoist = fir.load %scalar_ref1 : !fir.ref<f32>
+ fir.store %loaded_for_hoist to %scalar_ref2 : !fir.ref<f32>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workdistribute-runtime-assign-scalar.mlir b/flang/test/Transforms/OpenMP/lower-workdistribute-runtime-assign-scalar.mlir
new file mode 100644
index 0000000..03d5d71
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workdistribute-runtime-assign-scalar.mlir
@@ -0,0 +1,108 @@
+// RUN: fir-opt --lower-workdistribute %s | FileCheck %s
+
+// Test lowering of workdistribute for a scalar assignment within a target teams workdistribute region.
+// The test checks that the scalar assignment is correctly lowered to wsloop and loop_nest operations.
+
+// Example Fortran code:
+// !$omp target teams workdistribute
+// y = 3.0_real32
+// !$omp end target teams workdistribute
+
+
+// CHECK-LABEL: func.func @x(
+// CHECK: omp.target {{.*}} {
+// CHECK: omp.teams {
+// CHECK: omp.parallel {
+// CHECK: omp.distribute {
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_73:.*]]) : index = (%[[VAL_66:.*]]) to (%[[VAL_72:.*]]) inclusive step (%[[VAL_67:.*]]) {
+// CHECK: %[[VAL_74:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_75:.*]]:3 = fir.box_dims %[[VAL_64:.*]], %[[VAL_74]] : (!fir.box<!fir.array<?x?xf32>>, index) -> (index, index, index)
+// CHECK: %[[VAL_76:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_77:.*]]:3 = fir.box_dims %[[VAL_64]], %[[VAL_76]] : (!fir.box<!fir.array<?x?xf32>>, index) -> (index, index, index)
+// CHECK: %[[VAL_78:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_79:.*]] = arith.remsi %[[VAL_73]], %[[VAL_77]]#1 : index
+// CHECK: %[[VAL_80:.*]] = arith.addi %[[VAL_79]], %[[VAL_78]] : index
+// CHECK: %[[VAL_81:.*]] = arith.divsi %[[VAL_73]], %[[VAL_77]]#1 : index
+// CHECK: %[[VAL_82:.*]] = arith.remsi %[[VAL_81]], %[[VAL_75]]#1 : index
+// CHECK: %[[VAL_83:.*]] = arith.addi %[[VAL_82]], %[[VAL_78]] : index
+// CHECK: %[[VAL_84:.*]] = fir.array_coor %[[VAL_64]] %[[VAL_83]], %[[VAL_80]] : (!fir.box<!fir.array<?x?xf32>>, index, index) -> !fir.ref<f32>
+// CHECK: fir.store %[[VAL_65:.*]] to %[[VAL_84]] : !fir.ref<f32>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: } {omp.composite}
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+// CHECK: func.func private @_FortranAAssign(!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.ref<i8>, i32) attributes {fir.runtime}
+
+module attributes {llvm.target_triple = "amdgcn-amd-amdhsa", omp.is_gpu = true, omp.is_target_device = true} {
+func.func @x(%arr : !fir.ref<!fir.array<?x?xf32>>) {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c78 = arith.constant 78 : index
+ %cst = arith.constant 3.000000e+00 : f32
+ %0 = fir.alloca i32
+ %1 = fir.alloca i32
+ %c10 = arith.constant 10 : index
+ %c20 = arith.constant 20 : index
+ %194 = arith.subi %c10, %c1 : index
+ %195 = omp.map.bounds lower_bound(%c0 : index) upper_bound(%194 : index) extent(%c10 : index) stride(%c1 : index) start_idx(%c1 : index)
+ %196 = arith.subi %c20, %c1 : index
+ %197 = omp.map.bounds lower_bound(%c0 : index) upper_bound(%196 : index) extent(%c20 : index) stride(%c1 : index) start_idx(%c1 : index)
+ %198 = omp.map.info var_ptr(%arr : !fir.ref<!fir.array<?x?xf32>>, f32) map_clauses(implicit, tofrom) capture(ByRef) bounds(%195, %197) -> !fir.ref<!fir.array<?x?xf32>> {name = "y"}
+ %199 = omp.map.info var_ptr(%1 : !fir.ref<i32>, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !fir.ref<i32> {name = ""}
+ %200 = omp.map.info var_ptr(%0 : !fir.ref<i32>, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !fir.ref<i32> {name = ""}
+ omp.target map_entries(%198 -> %arg5, %199 -> %arg6, %200 -> %arg7 : !fir.ref<!fir.array<?x?xf32>>, !fir.ref<i32>, !fir.ref<i32>) {
+ %c0_0 = arith.constant 0 : index
+ %201 = fir.load %arg7 : !fir.ref<i32>
+ %202 = fir.load %arg6 : !fir.ref<i32>
+ %203 = fir.convert %202 : (i32) -> i64
+ %204 = fir.convert %201 : (i32) -> i64
+ %205 = fir.convert %204 : (i64) -> index
+ %206 = arith.cmpi sgt, %205, %c0_0 : index
+ %207 = fir.convert %203 : (i64) -> index
+ %208 = arith.cmpi sgt, %207, %c0_0 : index
+ %209 = arith.select %208, %207, %c0_0 : index
+ %210 = arith.select %206, %205, %c0_0 : index
+ %211 = fir.shape %210, %209 : (index, index) -> !fir.shape<2>
+ %212 = fir.declare %arg5(%211) {uniq_name = "_QFFaxpy_array_workdistributeEy"} : (!fir.ref<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.ref<!fir.array<?x?xf32>>
+ %213 = fir.embox %212(%211) : (!fir.ref<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.box<!fir.array<?x?xf32>>
+ omp.teams {
+ %214 = fir.alloca !fir.box<!fir.array<?x?xf32>> {pinned}
+ omp.workdistribute {
+ %215 = fir.alloca f32
+ %216 = fir.embox %215 : (!fir.ref<f32>) -> !fir.box<f32>
+ %217 = fir.shape %210, %209 : (index, index) -> !fir.shape<2>
+ %218 = fir.embox %212(%217) : (!fir.ref<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.box<!fir.array<?x?xf32>>
+ fir.store %218 to %214 : !fir.ref<!fir.box<!fir.array<?x?xf32>>>
+ %219 = fir.address_of(@_QQclXf9c642d28e5bba1f07fa9a090b72f4fc) : !fir.ref<!fir.char<1,78>>
+ %c39_i32 = arith.constant 39 : i32
+ %220 = fir.convert %214 : (!fir.ref<!fir.box<!fir.array<?x?xf32>>>) -> !fir.ref<!fir.box<none>>
+ %221 = fir.convert %216 : (!fir.box<f32>) -> !fir.box<none>
+ %222 = fir.convert %219 : (!fir.ref<!fir.char<1,78>>) -> !fir.ref<i8>
+ fir.call @_FortranAAssign(%220, %221, %222, %c39_i32) : (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.ref<i8>, i32) -> ()
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+func.func private @_FortranAAssign(!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.ref<i8>, i32) attributes {fir.runtime}
+
+fir.global linkonce @_QQclXf9c642d28e5bba1f07fa9a090b72f4fc constant : !fir.char<1,78> {
+ %0 = fir.string_lit "File: /work/github/skc7/llvm-project/build_fomp_reldebinfo/saxpy_tests/\00"(78) : !fir.char<1,78>
+ fir.has_value %0 : !fir.char<1,78>
+}
+}
diff --git a/flang/tools/fir-opt/CMakeLists.txt b/flang/tools/fir-opt/CMakeLists.txt
index c5bd439..82178c2 100644
--- a/flang/tools/fir-opt/CMakeLists.txt
+++ b/flang/tools/fir-opt/CMakeLists.txt
@@ -7,6 +7,7 @@ if(FLANG_INCLUDE_TESTS)
set(test_libs
FIRTestAnalysis
FIRTestOpenACCInterfaces
+ MLIROpenACCTestPasses
MLIRTestIR
)
endif()
diff --git a/flang/tools/fir-opt/fir-opt.cpp b/flang/tools/fir-opt/fir-opt.cpp
index b0b277b..32b0a1d 100644
--- a/flang/tools/fir-opt/fir-opt.cpp
+++ b/flang/tools/fir-opt/fir-opt.cpp
@@ -30,7 +30,10 @@ void registerTestFIROpenACCInterfacesPass();
// Defined in mlir/test, no pulic header.
namespace mlir {
void registerSideEffectTestPasses();
-}
+namespace test {
+void registerTestOpenACC();
+} // namespace test
+} // namespace mlir
int main(int argc, char **argv) {
fir::support::registerMLIRPassesForFortranTools();
@@ -43,6 +46,7 @@ int main(int argc, char **argv) {
fir::test::registerTestFIRAliasAnalysisPass();
fir::test::registerTestFIROpenACCInterfacesPass();
mlir::registerSideEffectTestPasses();
+ mlir::test::registerTestOpenACC();
#endif
DialectRegistry registry;
fir::support::registerDialects(registry);
diff --git a/libc/include/llvm-libc-macros/netinet-in-macros.h b/libc/include/llvm-libc-macros/netinet-in-macros.h
index c05e5e2..fb7564ce 100644
--- a/libc/include/llvm-libc-macros/netinet-in-macros.h
+++ b/libc/include/llvm-libc-macros/netinet-in-macros.h
@@ -16,4 +16,12 @@
#define IPPROTO_IPV6 41
#define IPPROTO_RAW 255
+#define IPV6_UNICAST_HOPS 16
+#define IPV6_MULTICAST_IF 17
+#define IPV6_MULTICAST_HOPS 18
+#define IPV6_MULTICAST_LOOP 19
+#define IPV6_JOIN_GROUP 20
+#define IPV6_LEAVE_GROUP 21
+#define IPV6_V6ONLY 26
+
#endif // LLVM_LIBC_MACROS_NETINET_IN_MACROS_H
diff --git a/libc/shared/math.h b/libc/shared/math.h
index e3f7965..bd6aee7 100644
--- a/libc/shared/math.h
+++ b/libc/shared/math.h
@@ -50,6 +50,7 @@
#include "math/exp2.h"
#include "math/exp2f.h"
#include "math/exp2f16.h"
+#include "math/exp2m1f.h"
#include "math/expf.h"
#include "math/expf16.h"
#include "math/frexpf.h"
diff --git a/libc/shared/math/exp2m1f.h b/libc/shared/math/exp2m1f.h
new file mode 100644
index 0000000..ca97547
--- /dev/null
+++ b/libc/shared/math/exp2m1f.h
@@ -0,0 +1,23 @@
+//===-- Shared exp2m1f function ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SHARED_MATH_EXP2M1F_H
+#define LLVM_LIBC_SHARED_MATH_EXP2M1F_H
+
+#include "shared/libc_common.h"
+#include "src/__support/math/exp2m1f.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace shared {
+
+using math::exp2m1f;
+
+} // namespace shared
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SHARED_MATH_EXP2M1F_H
diff --git a/libc/src/__support/math/CMakeLists.txt b/libc/src/__support/math/CMakeLists.txt
index 9685496..47bb328 100644
--- a/libc/src/__support/math/CMakeLists.txt
+++ b/libc/src/__support/math/CMakeLists.txt
@@ -752,6 +752,24 @@ add_header_library(
)
add_header_library(
+ exp2m1f
+ HDRS
+ exp2m1f.h
+ DEPENDS
+ .exp10f_utils
+ libc.src.errno.errno
+ libc.src.__support.common
+ libc.src.__support.FPUtil.except_value_utils
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.FPUtil.polyeval
+ libc.src.__support.FPUtil.rounding_mode
+ libc.src.__support.macros.optimization
+ libc.src.__support.macros.properties.cpu_features
+)
+
+add_header_library(
exp10
HDRS
exp10.h
diff --git a/libc/src/__support/math/exp2m1f.h b/libc/src/__support/math/exp2m1f.h
new file mode 100644
index 0000000..e95076c
--- /dev/null
+++ b/libc/src/__support/math/exp2m1f.h
@@ -0,0 +1,195 @@
+//===-- Implementation header for exp2m1f ------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_MATH_EXP2M1F_H
+#define LLVM_LIBC_SRC___SUPPORT_MATH_EXP2M1F_H
+
+#include "exp10f_utils.h"
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/except_value_utils.h"
+#include "src/__support/FPUtil/multiply_add.h"
+#include "src/__support/FPUtil/rounding_mode.h"
+#include "src/__support/common.h"
+#include "src/__support/libc_errno.h"
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/optimization.h"
+#include "src/__support/macros/properties/cpu_features.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+namespace math {
+
+LIBC_INLINE static constexpr float exp2m1f(float x) {
+#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+ constexpr size_t N_EXCEPTS_LO = 8;
+
+ constexpr fputil::ExceptValues<float, N_EXCEPTS_LO> EXP2M1F_EXCEPTS_LO = {{
+ // (input, RZ output, RU offset, RD offset, RN offset)
+ // x = 0x1.36dc8ep-36, exp2m1f(x) = 0x1.aef212p-37 (RZ)
+ {0x2d9b'6e47U, 0x2d57'7909U, 1U, 0U, 0U},
+ // x = 0x1.224936p-19, exp2m1f(x) = 0x1.926c0ep-20 (RZ)
+ {0x3611'249bU, 0x35c9'3607U, 1U, 0U, 1U},
+ // x = 0x1.d16d2p-20, exp2m1f(x) = 0x1.429becp-20 (RZ)
+ {0x35e8'b690U, 0x35a1'4df6U, 1U, 0U, 1U},
+ // x = 0x1.17949ep-14, exp2m1f(x) = 0x1.8397p-15 (RZ)
+ {0x388b'ca4fU, 0x3841'cb80U, 1U, 0U, 1U},
+ // x = -0x1.9c3e1ep-38, exp2m1f(x) = -0x1.1dbeacp-38 (RZ)
+ {0xacce'1f0fU, 0xac8e'df56U, 0U, 1U, 0U},
+ // x = -0x1.4d89b4p-32, exp2m1f(x) = -0x1.ce61b6p-33 (RZ)
+ {0xafa6'c4daU, 0xaf67'30dbU, 0U, 1U, 1U},
+ // x = -0x1.a6eac4p-10, exp2m1f(x) = -0x1.24fadap-10 (RZ)
+ {0xbad3'7562U, 0xba92'7d6dU, 0U, 1U, 1U},
+ // x = -0x1.e7526ep-6, exp2m1f(x) = -0x1.4e53dep-6 (RZ)
+ {0xbcf3'a937U, 0xbca7'29efU, 0U, 1U, 1U},
+ }};
+
+ constexpr size_t N_EXCEPTS_HI = 3;
+
+ constexpr fputil::ExceptValues<float, N_EXCEPTS_HI> EXP2M1F_EXCEPTS_HI = {{
+ // (input, RZ output, RU offset, RD offset, RN offset)
+ // x = 0x1.16a972p-1, exp2m1f(x) = 0x1.d545b2p-2 (RZ)
+ {0x3f0b'54b9U, 0x3eea'a2d9U, 1U, 0U, 0U},
+ // x = -0x1.9f12acp-5, exp2m1f(x) = -0x1.1ab68cp-5 (RZ)
+ {0xbd4f'8956U, 0xbd0d'5b46U, 0U, 1U, 0U},
+ // x = -0x1.de7b9cp-5, exp2m1f(x) = -0x1.4508f4p-5 (RZ)
+ {0xbd6f'3dceU, 0xbd22'847aU, 0U, 1U, 1U},
+ }};
+#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+
+ using FPBits = fputil::FPBits<float>;
+ FPBits xbits(x);
+
+ uint32_t x_u = xbits.uintval();
+ uint32_t x_abs = x_u & 0x7fff'ffffU;
+
+ // When |x| >= 128, or x is nan, or |x| <= 2^-5
+ if (LIBC_UNLIKELY(x_abs >= 0x4300'0000U || x_abs <= 0x3d00'0000U)) {
+ // |x| <= 2^-5
+ if (x_abs <= 0x3d00'0000U) {
+#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+ if (auto r = EXP2M1F_EXCEPTS_LO.lookup(x_u); LIBC_UNLIKELY(r.has_value()))
+ return r.value();
+#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+
+ // Minimax polynomial generated by Sollya with:
+ // > display = hexadecimal;
+ // > fpminimax((2^x - 1)/x, 5, [|D...|], [-2^-5, 2^-5]);
+ constexpr double COEFFS[] = {
+ 0x1.62e42fefa39f3p-1, 0x1.ebfbdff82c57bp-3, 0x1.c6b08d6f2d7aap-5,
+ 0x1.3b2ab6fc92f5dp-7, 0x1.5d897cfe27125p-10, 0x1.43090e61e6af1p-13};
+ double xd = x;
+ double xsq = xd * xd;
+ double c0 = fputil::multiply_add(xd, COEFFS[1], COEFFS[0]);
+ double c1 = fputil::multiply_add(xd, COEFFS[3], COEFFS[2]);
+ double c2 = fputil::multiply_add(xd, COEFFS[5], COEFFS[4]);
+ double p = fputil::polyeval(xsq, c0, c1, c2);
+ return static_cast<float>(p * xd);
+ }
+
+ // x >= 128, or x is nan
+ if (xbits.is_pos()) {
+ if (xbits.is_finite()) {
+ int rounding = fputil::quick_get_round();
+ if (rounding == FE_DOWNWARD || rounding == FE_TOWARDZERO)
+ return FPBits::max_normal().get_val();
+
+ fputil::set_errno_if_required(ERANGE);
+ fputil::raise_except_if_required(FE_OVERFLOW);
+ }
+
+ // x >= 128 and 2^x - 1 rounds to +inf, or x is +inf or nan
+ return x + FPBits::inf().get_val();
+ }
+ }
+
+ if (LIBC_UNLIKELY(x <= -25.0f)) {
+ // 2^(-inf) - 1 = -1
+ if (xbits.is_inf())
+ return -1.0f;
+ // 2^nan - 1 = nan
+ if (xbits.is_nan())
+ return x;
+
+ int rounding = fputil::quick_get_round();
+ if (rounding == FE_UPWARD || rounding == FE_TOWARDZERO)
+ return -0x1.ffff'fep-1f; // -1.0f + 0x1.0p-24f
+
+ fputil::set_errno_if_required(ERANGE);
+ fputil::raise_except_if_required(FE_UNDERFLOW);
+ return -1.0f;
+ }
+
+#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+ if (auto r = EXP2M1F_EXCEPTS_HI.lookup(x_u); LIBC_UNLIKELY(r.has_value()))
+ return r.value();
+#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+
+ // For -25 < x < 128, to compute 2^x, we perform the following range
+ // reduction: find hi, mid, lo such that:
+ // x = hi + mid + lo, in which:
+ // hi is an integer,
+ // 0 <= mid * 2^5 < 32 is an integer,
+ // -2^(-6) <= lo <= 2^(-6).
+ // In particular,
+ // hi + mid = round(x * 2^5) * 2^(-5).
+ // Then,
+ // 2^x = 2^(hi + mid + lo) = 2^hi * 2^mid * 2^lo.
+ // 2^mid is stored in the lookup table of 32 elements.
+ // 2^lo is computed using a degree-4 minimax polynomial generated by Sollya.
+ // We perform 2^hi * 2^mid by simply add hi to the exponent field of 2^mid.
+
+ // kf = (hi + mid) * 2^5 = round(x * 2^5)
+ float kf = 0;
+ int k = 0;
+#ifdef LIBC_TARGET_CPU_HAS_NEAREST_INT
+ kf = fputil::nearest_integer(x * 32.0f);
+ k = static_cast<int>(kf);
+#else
+ constexpr float HALF[2] = {0.5f, -0.5f};
+ k = static_cast<int>(fputil::multiply_add(x, 32.0f, HALF[x < 0.0f]));
+ kf = static_cast<float>(k);
+#endif // LIBC_TARGET_CPU_HAS_NEAREST_INT
+
+ // lo = x - (hi + mid) = x - kf * 2^(-5)
+ double lo = fputil::multiply_add(-0x1.0p-5f, kf, x);
+
+ // hi = floor(kf * 2^(-4))
+ // exp2_hi = shift hi to the exponent field of double precision.
+ int64_t exp2_hi =
+ static_cast<int64_t>(static_cast<uint64_t>(k >> ExpBase::MID_BITS)
+ << fputil::FPBits<double>::FRACTION_LEN);
+ // mh = 2^hi * 2^mid
+ // mh_bits = bit field of mh
+ int64_t mh_bits = ExpBase::EXP_2_MID[k & ExpBase::MID_MASK] + exp2_hi;
+ double mh = fputil::FPBits<double>(static_cast<uint64_t>(mh_bits)).get_val();
+
+ // Degree-4 polynomial approximating (2^x - 1)/x generated by Sollya with:
+ // > display = hexadecimal;
+ // > fpminimax((2^x - 1)/x, 4, [|D...|], [-2^-6, 2^-6]);
+ constexpr double COEFFS[5] = {0x1.62e42fefa39efp-1, 0x1.ebfbdff8131c4p-3,
+ 0x1.c6b08d7061695p-5, 0x1.3b2b1bee74b2ap-7,
+ 0x1.5d88091198529p-10};
+ double lo_sq = lo * lo;
+ double c1 = fputil::multiply_add(lo, COEFFS[0], 1.0);
+ double c2 = fputil::multiply_add(lo, COEFFS[2], COEFFS[1]);
+ double c3 = fputil::multiply_add(lo, COEFFS[4], COEFFS[3]);
+ double exp2_lo = fputil::polyeval(lo_sq, c1, c2, c3);
+ // 2^x - 1 = 2^(hi + mid + lo) - 1
+ // = 2^(hi + mid) * 2^lo - 1
+ // ~ mh * (1 + lo * P(lo)) - 1
+ // = mh * exp2_lo - 1
+ return static_cast<float>(fputil::multiply_add(exp2_lo, mh, -1.0));
+}
+
+} // namespace math
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC___SUPPORT_MATH_EXP2M1F_H
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 0754b5e..6068c36 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -1488,17 +1488,7 @@ add_entrypoint_object(
HDRS
../exp2m1f.h
DEPENDS
- libc.src.errno.errno
- libc.src.__support.common
- libc.src.__support.FPUtil.except_value_utils
- libc.src.__support.FPUtil.fenv_impl
- libc.src.__support.FPUtil.fp_bits
- libc.src.__support.FPUtil.multiply_add
- libc.src.__support.FPUtil.polyeval
- libc.src.__support.FPUtil.rounding_mode
- libc.src.__support.macros.optimization
- libc.src.__support.macros.properties.cpu_features
- libc.src.__support.math.exp10f_utils
+ libc.src.__support.math.exp2m1f
)
add_entrypoint_object(
diff --git a/libc/src/math/generic/exp2m1f.cpp b/libc/src/math/generic/exp2m1f.cpp
index 16244ed..14d026f 100644
--- a/libc/src/math/generic/exp2m1f.cpp
+++ b/libc/src/math/generic/exp2m1f.cpp
@@ -7,183 +7,10 @@
//===----------------------------------------------------------------------===//
#include "src/math/exp2m1f.h"
-#include "src/__support/FPUtil/FEnvImpl.h"
-#include "src/__support/FPUtil/FPBits.h"
-#include "src/__support/FPUtil/PolyEval.h"
-#include "src/__support/FPUtil/except_value_utils.h"
-#include "src/__support/FPUtil/multiply_add.h"
-#include "src/__support/FPUtil/rounding_mode.h"
-#include "src/__support/common.h"
-#include "src/__support/libc_errno.h"
-#include "src/__support/macros/config.h"
-#include "src/__support/macros/optimization.h"
-#include "src/__support/macros/properties/cpu_features.h"
-#include "src/__support/math/exp10f_utils.h"
+#include "src/__support/math/exp2m1f.h"
namespace LIBC_NAMESPACE_DECL {
-#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-static constexpr size_t N_EXCEPTS_LO = 8;
-
-static constexpr fputil::ExceptValues<float, N_EXCEPTS_LO> EXP2M1F_EXCEPTS_LO =
- {{
- // (input, RZ output, RU offset, RD offset, RN offset)
- // x = 0x1.36dc8ep-36, exp2m1f(x) = 0x1.aef212p-37 (RZ)
- {0x2d9b'6e47U, 0x2d57'7909U, 1U, 0U, 0U},
- // x = 0x1.224936p-19, exp2m1f(x) = 0x1.926c0ep-20 (RZ)
- {0x3611'249bU, 0x35c9'3607U, 1U, 0U, 1U},
- // x = 0x1.d16d2p-20, exp2m1f(x) = 0x1.429becp-20 (RZ)
- {0x35e8'b690U, 0x35a1'4df6U, 1U, 0U, 1U},
- // x = 0x1.17949ep-14, exp2m1f(x) = 0x1.8397p-15 (RZ)
- {0x388b'ca4fU, 0x3841'cb80U, 1U, 0U, 1U},
- // x = -0x1.9c3e1ep-38, exp2m1f(x) = -0x1.1dbeacp-38 (RZ)
- {0xacce'1f0fU, 0xac8e'df56U, 0U, 1U, 0U},
- // x = -0x1.4d89b4p-32, exp2m1f(x) = -0x1.ce61b6p-33 (RZ)
- {0xafa6'c4daU, 0xaf67'30dbU, 0U, 1U, 1U},
- // x = -0x1.a6eac4p-10, exp2m1f(x) = -0x1.24fadap-10 (RZ)
- {0xbad3'7562U, 0xba92'7d6dU, 0U, 1U, 1U},
- // x = -0x1.e7526ep-6, exp2m1f(x) = -0x1.4e53dep-6 (RZ)
- {0xbcf3'a937U, 0xbca7'29efU, 0U, 1U, 1U},
- }};
-
-static constexpr size_t N_EXCEPTS_HI = 3;
-
-static constexpr fputil::ExceptValues<float, N_EXCEPTS_HI> EXP2M1F_EXCEPTS_HI =
- {{
- // (input, RZ output, RU offset, RD offset, RN offset)
- // x = 0x1.16a972p-1, exp2m1f(x) = 0x1.d545b2p-2 (RZ)
- {0x3f0b'54b9U, 0x3eea'a2d9U, 1U, 0U, 0U},
- // x = -0x1.9f12acp-5, exp2m1f(x) = -0x1.1ab68cp-5 (RZ)
- {0xbd4f'8956U, 0xbd0d'5b46U, 0U, 1U, 0U},
- // x = -0x1.de7b9cp-5, exp2m1f(x) = -0x1.4508f4p-5 (RZ)
- {0xbd6f'3dceU, 0xbd22'847aU, 0U, 1U, 1U},
- }};
-#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-
-LLVM_LIBC_FUNCTION(float, exp2m1f, (float x)) {
- using FPBits = fputil::FPBits<float>;
- FPBits xbits(x);
-
- uint32_t x_u = xbits.uintval();
- uint32_t x_abs = x_u & 0x7fff'ffffU;
-
- // When |x| >= 128, or x is nan, or |x| <= 2^-5
- if (LIBC_UNLIKELY(x_abs >= 0x4300'0000U || x_abs <= 0x3d00'0000U)) {
- // |x| <= 2^-5
- if (x_abs <= 0x3d00'0000U) {
-#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
- if (auto r = EXP2M1F_EXCEPTS_LO.lookup(x_u); LIBC_UNLIKELY(r.has_value()))
- return r.value();
-#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-
- // Minimax polynomial generated by Sollya with:
- // > display = hexadecimal;
- // > fpminimax((2^x - 1)/x, 5, [|D...|], [-2^-5, 2^-5]);
- constexpr double COEFFS[] = {
- 0x1.62e42fefa39f3p-1, 0x1.ebfbdff82c57bp-3, 0x1.c6b08d6f2d7aap-5,
- 0x1.3b2ab6fc92f5dp-7, 0x1.5d897cfe27125p-10, 0x1.43090e61e6af1p-13};
- double xd = x;
- double xsq = xd * xd;
- double c0 = fputil::multiply_add(xd, COEFFS[1], COEFFS[0]);
- double c1 = fputil::multiply_add(xd, COEFFS[3], COEFFS[2]);
- double c2 = fputil::multiply_add(xd, COEFFS[5], COEFFS[4]);
- double p = fputil::polyeval(xsq, c0, c1, c2);
- return static_cast<float>(p * xd);
- }
-
- // x >= 128, or x is nan
- if (xbits.is_pos()) {
- if (xbits.is_finite()) {
- int rounding = fputil::quick_get_round();
- if (rounding == FE_DOWNWARD || rounding == FE_TOWARDZERO)
- return FPBits::max_normal().get_val();
-
- fputil::set_errno_if_required(ERANGE);
- fputil::raise_except_if_required(FE_OVERFLOW);
- }
-
- // x >= 128 and 2^x - 1 rounds to +inf, or x is +inf or nan
- return x + FPBits::inf().get_val();
- }
- }
-
- if (LIBC_UNLIKELY(x <= -25.0f)) {
- // 2^(-inf) - 1 = -1
- if (xbits.is_inf())
- return -1.0f;
- // 2^nan - 1 = nan
- if (xbits.is_nan())
- return x;
-
- int rounding = fputil::quick_get_round();
- if (rounding == FE_UPWARD || rounding == FE_TOWARDZERO)
- return -0x1.ffff'fep-1f; // -1.0f + 0x1.0p-24f
-
- fputil::set_errno_if_required(ERANGE);
- fputil::raise_except_if_required(FE_UNDERFLOW);
- return -1.0f;
- }
-
-#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
- if (auto r = EXP2M1F_EXCEPTS_HI.lookup(x_u); LIBC_UNLIKELY(r.has_value()))
- return r.value();
-#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-
- // For -25 < x < 128, to compute 2^x, we perform the following range
- // reduction: find hi, mid, lo such that:
- // x = hi + mid + lo, in which:
- // hi is an integer,
- // 0 <= mid * 2^5 < 32 is an integer,
- // -2^(-6) <= lo <= 2^(-6).
- // In particular,
- // hi + mid = round(x * 2^5) * 2^(-5).
- // Then,
- // 2^x = 2^(hi + mid + lo) = 2^hi * 2^mid * 2^lo.
- // 2^mid is stored in the lookup table of 32 elements.
- // 2^lo is computed using a degree-4 minimax polynomial generated by Sollya.
- // We perform 2^hi * 2^mid by simply add hi to the exponent field of 2^mid.
-
- // kf = (hi + mid) * 2^5 = round(x * 2^5)
- float kf;
- int k;
-#ifdef LIBC_TARGET_CPU_HAS_NEAREST_INT
- kf = fputil::nearest_integer(x * 32.0f);
- k = static_cast<int>(kf);
-#else
- constexpr float HALF[2] = {0.5f, -0.5f};
- k = static_cast<int>(fputil::multiply_add(x, 32.0f, HALF[x < 0.0f]));
- kf = static_cast<float>(k);
-#endif // LIBC_TARGET_CPU_HAS_NEAREST_INT
-
- // lo = x - (hi + mid) = x - kf * 2^(-5)
- double lo = fputil::multiply_add(-0x1.0p-5f, kf, x);
-
- // hi = floor(kf * 2^(-4))
- // exp2_hi = shift hi to the exponent field of double precision.
- int64_t exp2_hi =
- static_cast<int64_t>(static_cast<uint64_t>(k >> ExpBase::MID_BITS)
- << fputil::FPBits<double>::FRACTION_LEN);
- // mh = 2^hi * 2^mid
- // mh_bits = bit field of mh
- int64_t mh_bits = ExpBase::EXP_2_MID[k & ExpBase::MID_MASK] + exp2_hi;
- double mh = fputil::FPBits<double>(static_cast<uint64_t>(mh_bits)).get_val();
-
- // Degree-4 polynomial approximating (2^x - 1)/x generated by Sollya with:
- // > display = hexadecimal;
- // > fpminimax((2^x - 1)/x, 4, [|D...|], [-2^-6, 2^-6]);
- constexpr double COEFFS[5] = {0x1.62e42fefa39efp-1, 0x1.ebfbdff8131c4p-3,
- 0x1.c6b08d7061695p-5, 0x1.3b2b1bee74b2ap-7,
- 0x1.5d88091198529p-10};
- double lo_sq = lo * lo;
- double c1 = fputil::multiply_add(lo, COEFFS[0], 1.0);
- double c2 = fputil::multiply_add(lo, COEFFS[2], COEFFS[1]);
- double c3 = fputil::multiply_add(lo, COEFFS[4], COEFFS[3]);
- double exp2_lo = fputil::polyeval(lo_sq, c1, c2, c3);
- // 2^x - 1 = 2^(hi + mid + lo) - 1
- // = 2^(hi + mid) * 2^lo - 1
- // ~ mh * (1 + lo * P(lo)) - 1
- // = mh * exp2_lo - 1
- return static_cast<float>(fputil::multiply_add(exp2_lo, mh, -1.0));
-}
+LLVM_LIBC_FUNCTION(float, exp2m1f, (float x)) { return math::exp2m1f(x); }
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/test/include/netinet_in_test.cpp b/libc/test/include/netinet_in_test.cpp
index a6c47a7..714892f 100644
--- a/libc/test/include/netinet_in_test.cpp
+++ b/libc/test/include/netinet_in_test.cpp
@@ -17,3 +17,13 @@ TEST(LlvmLibcNetinetInTest, IPPROTOMacro) {
EXPECT_EQ(IPPROTO_IPV6, 41);
EXPECT_EQ(IPPROTO_RAW, 255);
}
+
+TEST(LlvmLibcNetinetInTest, IPV6Macro) {
+ EXPECT_EQ(IPV6_UNICAST_HOPS, 16);
+ EXPECT_EQ(IPV6_MULTICAST_IF, 17);
+ EXPECT_EQ(IPV6_MULTICAST_HOPS, 18);
+ EXPECT_EQ(IPV6_MULTICAST_LOOP, 19);
+ EXPECT_EQ(IPV6_JOIN_GROUP, 20);
+ EXPECT_EQ(IPV6_LEAVE_GROUP, 21);
+ EXPECT_EQ(IPV6_V6ONLY, 26);
+}
diff --git a/libc/test/shared/CMakeLists.txt b/libc/test/shared/CMakeLists.txt
index 8d81199..aede395 100644
--- a/libc/test/shared/CMakeLists.txt
+++ b/libc/test/shared/CMakeLists.txt
@@ -43,6 +43,7 @@ add_fp_unittest(
libc.src.__support.math.exp2
libc.src.__support.math.exp2f
libc.src.__support.math.exp2f16
+ libc.src.__support.math.exp2m1f
libc.src.__support.math.exp10
libc.src.__support.math.exp10f
libc.src.__support.math.exp10f16
diff --git a/libc/test/shared/shared_math_test.cpp b/libc/test/shared/shared_math_test.cpp
index 84787d5..a6825a1 100644
--- a/libc/test/shared/shared_math_test.cpp
+++ b/libc/test/shared/shared_math_test.cpp
@@ -61,6 +61,7 @@ TEST(LlvmLibcSharedMathTest, AllFloat) {
EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::exp10m1f(0.0f));
EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::erff(0.0f));
EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::exp10f(0.0f));
+ EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::exp2m1f(0.0f));
EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::expf(0.0f));
EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::exp2f(0.0f));
diff --git a/libc/test/src/arpa/inet/CMakeLists.txt b/libc/test/src/arpa/inet/CMakeLists.txt
index 6e78e3a..21760df 100644
--- a/libc/test/src/arpa/inet/CMakeLists.txt
+++ b/libc/test/src/arpa/inet/CMakeLists.txt
@@ -6,8 +6,6 @@ add_libc_unittest(
libc_arpa_inet_unittests
SRCS
htonl_test.cpp
- CXX_STANDARD
- 20
DEPENDS
libc.src.arpa.inet.htonl
libc.src.arpa.inet.ntohl
@@ -19,8 +17,6 @@ add_libc_unittest(
libc_arpa_inet_unittests
SRCS
htons_test.cpp
- CXX_STANDARD
- 20
DEPENDS
libc.src.arpa.inet.htons
libc.src.arpa.inet.ntohs
@@ -32,8 +28,6 @@ add_libc_unittest(
libc_arpa_inet_unittests
SRCS
ntohl_test.cpp
- CXX_STANDARD
- 20
DEPENDS
libc.src.arpa.inet.htonl
libc.src.arpa.inet.ntohl
@@ -45,8 +39,6 @@ add_libc_unittest(
libc_arpa_inet_unittests
SRCS
ntohs_test.cpp
- CXX_STANDARD
- 20
DEPENDS
libc.src.arpa.inet.htons
libc.src.arpa.inet.ntohs
diff --git a/libcxx/include/__configuration/abi.h b/libcxx/include/__configuration/abi.h
index 2d33b9c..c9936df 100644
--- a/libcxx/include/__configuration/abi.h
+++ b/libcxx/include/__configuration/abi.h
@@ -30,8 +30,20 @@
#elif _LIBCPP_ABI_FORCE_MICROSOFT
# define _LIBCPP_ABI_MICROSOFT
#else
+// Windows uses the Microsoft ABI
# if defined(_WIN32) && defined(_MSC_VER)
# define _LIBCPP_ABI_MICROSOFT
+
+// 32-bit ARM uses the Itanium ABI with a few differences (array cookies, etc),
+// and so does 64-bit ARM on Apple platforms.
+# elif defined(__arm__) || (defined(__APPLE__) && defined(__aarch64__))
+# define _LIBCPP_ABI_ITANIUM_WITH_ARM_DIFFERENCES
+
+// Non-Apple 64-bit ARM uses the vanilla Itanium ABI
+# elif defined(__aarch64__)
+# define _LIBCPP_ABI_ITANIUM
+
+// We assume that other architectures also use the vanilla Itanium ABI too
# else
# define _LIBCPP_ABI_ITANIUM
# endif
diff --git a/libcxx/include/__cxx03/vector b/libcxx/include/__cxx03/vector
index dbaa33c..43e82cd 100644
--- a/libcxx/include/__cxx03/vector
+++ b/libcxx/include/__cxx03/vector
@@ -1630,7 +1630,7 @@ private:
return __n * __bits_per_word;
}
_LIBCPP_HIDE_FROM_ABI static size_type __external_cap_to_internal(size_type __n) _NOEXCEPT {
- return (__n - 1) / __bits_per_word + 1;
+ return __n > 0 ? (__n - 1) / __bits_per_word + 1 : size_type(0);
}
public:
@@ -2142,11 +2142,13 @@ void vector<bool, _Allocator>::reserve(size_type __n) {
template <class _Allocator>
void vector<bool, _Allocator>::shrink_to_fit() _NOEXCEPT {
- if (__external_cap_to_internal(size()) > __cap()) {
+ if (__external_cap_to_internal(size()) < __cap()) {
#ifndef _LIBCPP_HAS_NO_EXCEPTIONS
try {
#endif // _LIBCPP_HAS_NO_EXCEPTIONS
- vector(*this, allocator_type(__alloc())).swap(*this);
+ vector __v(*this, allocator_type(__alloc()));
+ if (__v.__cap() < __cap())
+ __v.swap(*this);
#ifndef _LIBCPP_HAS_NO_EXCEPTIONS
} catch (...) {
}
diff --git a/libcxx/include/__memory/array_cookie.h b/libcxx/include/__memory/array_cookie.h
index 806a9e9..be59f36 100644
--- a/libcxx/include/__memory/array_cookie.h
+++ b/libcxx/include/__memory/array_cookie.h
@@ -13,6 +13,7 @@
#include <__config>
#include <__configuration/abi.h>
#include <__cstddef/size_t.h>
+#include <__memory/addressof.h>
#include <__type_traits/integral_constant.h>
#include <__type_traits/is_trivially_destructible.h>
#include <__type_traits/negation.h>
@@ -26,14 +27,15 @@ _LIBCPP_BEGIN_NAMESPACE_STD
// Trait representing whether a type requires an array cookie at the start of its allocation when
// allocated as `new T[n]` and deallocated as `delete[] array`.
//
-// Under the Itanium C++ ABI [1], we know that an array cookie is available unless `T` is trivially
-// destructible and the call to `operator delete[]` is not a sized operator delete. Under ABIs other
-// than the Itanium ABI, we assume there are no array cookies.
+// Under the Itanium C++ ABI [1] and the ARM ABI which derives from it, we know that an array cookie is available
+// unless `T` is trivially destructible and the call to `operator delete[]` is not a sized operator delete. Under
+// other ABIs, we assume there are no array cookies.
//
// [1]: https://itanium-cxx-abi.github.io/cxx-abi/abi.html#array-cookies
-#ifdef _LIBCPP_ABI_ITANIUM
+#if defined(_LIBCPP_ABI_ITANIUM) || defined(_LIBCPP_ABI_ITANIUM_WITH_ARM_DIFFERENCES)
// TODO: Use a builtin instead
-// TODO: We should factor in the choice of the usual deallocation function in this determination.
+// TODO: We should factor in the choice of the usual deallocation function in this determination:
+// a cookie may be available in more cases but we ignore those for now.
template <class _Tp>
struct __has_array_cookie : _Not<is_trivially_destructible<_Tp> > {};
#else
@@ -41,13 +43,79 @@ template <class _Tp>
struct __has_array_cookie : false_type {};
#endif
+struct __itanium_array_cookie {
+ size_t __element_count;
+};
+
+template <class _Tp>
+struct [[__gnu__::__aligned__(_LIBCPP_ALIGNOF(_Tp))]] __arm_array_cookie {
+ size_t __element_size;
+ size_t __element_count;
+};
+
+// Return the element count in the array cookie located before the given pointer.
+//
+// In the Itanium ABI [1]
+// ----------------------
+// The element count is stored immediately before the first element of the array. If the preferred alignment
+// of array elements (which is different from the ABI alignment) is more than that of size_t, additional
+// padding bytes exist before the array cookie. Assuming array elements of size and alignment 16 bytes, that
+// gives us the following layout:
+//
+// |ooooooooxxxxxxxxaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbccccccccccccccccdddddddddddddddd|
+// ^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// | ^^^^^^^^ |
+// | | array elements
+// padding |
+// element count
+//
+//
+// In the Itanium ABI with ARM differences [2]
+// -------------------------------------------
+// The array cookie is stored at the very start of the allocation and it has the following form:
+//
+// struct array_cookie {
+// std::size_t element_size; // element_size != 0
+// std::size_t element_count;
+// };
+//
+// Assuming elements of size and alignment 32 bytes, this gives us the following layout:
+//
+// |xxxxxxxxXXXXXXXXooooooooooooooooaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb|
+// ^^^^^^^^ ^^^^^^^^^^^^^^^^
+// | ^^^^^^^^ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// element size | padding |
+// element count array elements
+//
+// We must be careful to take into account the alignment of the array cookie, which may result in padding
+// bytes between the element count and the first element of the array. Note that for ARM, the compiler
+// aligns the array cookie using the ABI alignment, not the preferred alignment of array elements.
+//
+// [1]: https://itanium-cxx-abi.github.io/cxx-abi/abi.html#array-cookies
+// [2]: https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Handle-C++-differences
template <class _Tp>
// Avoid failures when -fsanitize-address-poison-custom-array-cookie is enabled
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_NO_SANITIZE("address") size_t __get_array_cookie(_Tp const* __ptr) {
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_NO_SANITIZE("address") size_t __get_array_cookie([[__maybe_unused__]] _Tp const* __ptr) {
static_assert(
__has_array_cookie<_Tp>::value, "Trying to access the array cookie of a type that is not guaranteed to have one");
- size_t const* __cookie = reinterpret_cast<size_t const*>(__ptr) - 1; // TODO: Use a builtin instead
- return *__cookie;
+
+#if defined(_LIBCPP_ABI_ITANIUM)
+ using _ArrayCookie = __itanium_array_cookie;
+#elif defined(_LIBCPP_ABI_ITANIUM_WITH_ARM_DIFFERENCES)
+ using _ArrayCookie = __arm_array_cookie<_Tp>;
+#else
+ static_assert(false, "The array cookie layout is unknown on this ABI");
+ struct _ArrayCookie { // dummy definition required to make the function parse
+ size_t element_count;
+ };
+#endif
+
+ char const* __array_cookie_start = reinterpret_cast<char const*>(__ptr) - sizeof(_ArrayCookie);
+ _ArrayCookie __cookie;
+ // This is necessary to avoid violating strict aliasing. It's valid because _ArrayCookie is an
+ // implicit lifetime type.
+ __builtin_memcpy(std::addressof(__cookie), __array_cookie_start, sizeof(_ArrayCookie));
+ return __cookie.__element_count;
}
_LIBCPP_END_NAMESPACE_STD
diff --git a/libcxx/include/__tree b/libcxx/include/__tree
index d7d074a0..0738c8c 100644
--- a/libcxx/include/__tree
+++ b/libcxx/include/__tree
@@ -1119,15 +1119,15 @@ public:
_LIBCPP_HIDE_FROM_ABI _InsertReturnType __node_handle_insert_unique(_NodeHandle&&);
template <class _NodeHandle>
_LIBCPP_HIDE_FROM_ABI iterator __node_handle_insert_unique(const_iterator, _NodeHandle&&);
- template <class _Tree>
- _LIBCPP_HIDE_FROM_ABI void __node_handle_merge_unique(_Tree& __source);
+ template <class _Comp2>
+ _LIBCPP_HIDE_FROM_ABI void __node_handle_merge_unique(__tree<_Tp, _Comp2, _Allocator>& __source);
template <class _NodeHandle>
_LIBCPP_HIDE_FROM_ABI iterator __node_handle_insert_multi(_NodeHandle&&);
template <class _NodeHandle>
_LIBCPP_HIDE_FROM_ABI iterator __node_handle_insert_multi(const_iterator, _NodeHandle&&);
- template <class _Tree>
- _LIBCPP_HIDE_FROM_ABI void __node_handle_merge_multi(_Tree& __source);
+ template <class _Comp2>
+ _LIBCPP_HIDE_FROM_ABI void __node_handle_merge_multi(__tree<_Tp, _Comp2, _Allocator>& __source);
template <class _NodeHandle>
_LIBCPP_HIDE_FROM_ABI _NodeHandle __node_handle_extract(key_type const&);
@@ -2020,11 +2020,10 @@ _LIBCPP_HIDE_FROM_ABI _NodeHandle __tree<_Tp, _Compare, _Allocator>::__node_hand
}
template <class _Tp, class _Compare, class _Allocator>
-template <class _Tree>
-_LIBCPP_HIDE_FROM_ABI void __tree<_Tp, _Compare, _Allocator>::__node_handle_merge_unique(_Tree& __source) {
- static_assert(is_same<typename _Tree::__node_pointer, __node_pointer>::value, "");
-
- for (typename _Tree::iterator __i = __source.begin(); __i != __source.end();) {
+template <class _Comp2>
+_LIBCPP_HIDE_FROM_ABI void
+__tree<_Tp, _Compare, _Allocator>::__node_handle_merge_unique(__tree<_Tp, _Comp2, _Allocator>& __source) {
+ for (iterator __i = __source.begin(); __i != __source.end();) {
__node_pointer __src_ptr = __i.__get_np();
auto [__parent, __child] = __find_equal(__src_ptr->__get_value());
++__i;
@@ -2065,11 +2064,10 @@ __tree<_Tp, _Compare, _Allocator>::__node_handle_insert_multi(const_iterator __h
}
template <class _Tp, class _Compare, class _Allocator>
-template <class _Tree>
-_LIBCPP_HIDE_FROM_ABI void __tree<_Tp, _Compare, _Allocator>::__node_handle_merge_multi(_Tree& __source) {
- static_assert(is_same<typename _Tree::__node_pointer, __node_pointer>::value, "");
-
- for (typename _Tree::iterator __i = __source.begin(); __i != __source.end();) {
+template <class _Comp2>
+_LIBCPP_HIDE_FROM_ABI void
+__tree<_Tp, _Compare, _Allocator>::__node_handle_merge_multi(__tree<_Tp, _Comp2, _Allocator>& __source) {
+ for (iterator __i = __source.begin(); __i != __source.end();) {
__node_pointer __src_ptr = __i.__get_np();
__end_node_pointer __parent;
__node_base_pointer& __child = __find_leaf_high(__parent, __src_ptr->__get_value());
diff --git a/libcxx/test/std/containers/sequences/vector.bool/shrink_to_fit.pass.cpp b/libcxx/test/std/containers/sequences/vector.bool/shrink_to_fit.pass.cpp
index 665867a..bf17733 100644
--- a/libcxx/test/std/containers/sequences/vector.bool/shrink_to_fit.pass.cpp
+++ b/libcxx/test/std/containers/sequences/vector.bool/shrink_to_fit.pass.cpp
@@ -11,8 +11,6 @@
// void shrink_to_fit();
-// XFAIL: FROZEN-CXX03-HEADERS-FIXME
-
#include <cassert>
#include <climits>
#include <vector>
diff --git a/libcxx/test/std/utilities/smartptr/unique.ptr/unique.ptr.class/unique.ptr.observers/assert.subscript.pass.cpp b/libcxx/test/std/utilities/smartptr/unique.ptr/unique.ptr.class/unique.ptr.observers/assert.subscript.pass.cpp
index b7cc123..f7390ef 100644
--- a/libcxx/test/std/utilities/smartptr/unique.ptr/unique.ptr.class/unique.ptr.observers/assert.subscript.pass.cpp
+++ b/libcxx/test/std/utilities/smartptr/unique.ptr/unique.ptr.class/unique.ptr.observers/assert.subscript.pass.cpp
@@ -58,15 +58,18 @@ void test() {
{
{
std::unique_ptr<WithCookie[]> ptr(new WithCookie[5]);
+ assert(&ptr[1] == ptr.get() + 1); // ensure no assertion
TEST_LIBCPP_ASSERT_FAILURE(ptr[6], "unique_ptr<T[]>::operator[](index): index out of range");
}
{
std::unique_ptr<WithCookie[]> ptr = std::make_unique<WithCookie[]>(5);
+ assert(&ptr[1] == ptr.get() + 1); // ensure no assertion
TEST_LIBCPP_ASSERT_FAILURE(ptr[6], "unique_ptr<T[]>::operator[](index): index out of range");
}
#if TEST_STD_VER >= 20
{
std::unique_ptr<WithCookie[]> ptr = std::make_unique_for_overwrite<WithCookie[]>(5);
+ assert(&ptr[1] == ptr.get() + 1); // ensure no assertion
TEST_LIBCPP_ASSERT_FAILURE(ptr[6] = WithCookie(), "unique_ptr<T[]>::operator[](index): index out of range");
}
#endif
@@ -82,11 +85,13 @@ void test() {
{
{
std::unique_ptr<NoCookie[]> ptr = std::make_unique<NoCookie[]>(5);
+ assert(&ptr[1] == ptr.get() + 1); // ensure no assertion
TEST_LIBCPP_ASSERT_FAILURE(ptr[6], "unique_ptr<T[]>::operator[](index): index out of range");
}
# if TEST_STD_VER >= 20
{
std::unique_ptr<NoCookie[]> ptr = std::make_unique_for_overwrite<NoCookie[]>(5);
+ assert(&ptr[1] == ptr.get() + 1); // ensure no assertion
TEST_LIBCPP_ASSERT_FAILURE(ptr[6] = NoCookie(), "unique_ptr<T[]>::operator[](index): index out of range");
}
# endif
@@ -101,6 +106,7 @@ void test() {
{
std::unique_ptr<T[]> ptr = std::make_unique<T[]>(5);
std::unique_ptr<T[]> other(std::move(ptr));
+ assert(&other[1] == other.get() + 1); // ensure no assertion
TEST_LIBCPP_ASSERT_FAILURE(other[6], "unique_ptr<T[]>::operator[](index): index out of range");
}
@@ -109,6 +115,7 @@ void test() {
std::unique_ptr<T[]> ptr = std::make_unique<T[]>(5);
std::unique_ptr<T[]> other;
other = std::move(ptr);
+ assert(&other[1] == other.get() + 1); // ensure no assertion
TEST_LIBCPP_ASSERT_FAILURE(other[6], "unique_ptr<T[]>::operator[](index): index out of range");
}
@@ -116,6 +123,7 @@ void test() {
{
std::unique_ptr<T[]> ptr = std::make_unique<T[]>(5);
std::unique_ptr<T[], MyDeleter> other(std::move(ptr));
+ assert(&other[1] == other.get() + 1); // ensure no assertion
TEST_LIBCPP_ASSERT_FAILURE(other[6], "unique_ptr<T[]>::operator[](index): index out of range");
}
@@ -124,6 +132,7 @@ void test() {
std::unique_ptr<T[]> ptr = std::make_unique<T[]>(5);
std::unique_ptr<T[], MyDeleter> other;
other = std::move(ptr);
+ assert(&other[1] == other.get() + 1); // ensure no assertion
TEST_LIBCPP_ASSERT_FAILURE(other[6], "unique_ptr<T[]>::operator[](index): index out of range");
}
});
@@ -144,6 +153,34 @@ struct WithCookie {
char padding[Size];
};
+template <std::size_t Size>
+struct alignas(128) OveralignedNoCookie {
+ char padding[Size];
+};
+
+template <std::size_t Size>
+struct alignas(128) OveralignedWithCookie {
+ OveralignedWithCookie() = default;
+ OveralignedWithCookie(OveralignedWithCookie const&) {}
+ OveralignedWithCookie& operator=(OveralignedWithCookie const&) { return *this; }
+ ~OveralignedWithCookie() {}
+ char padding[Size];
+};
+
+// These types have a different ABI alignment (alignof) and preferred alignment (__alignof) on some platforms.
+// Make sure things work with these types because array cookies can be sensitive to preferred alignment on some
+// platforms.
+struct WithCookiePreferredAlignment {
+ WithCookiePreferredAlignment() = default;
+ WithCookiePreferredAlignment(WithCookiePreferredAlignment const&) {}
+ WithCookiePreferredAlignment& operator=(WithCookiePreferredAlignment const&) { return *this; }
+ ~WithCookiePreferredAlignment() {}
+ long double data;
+};
+struct NoCookiePreferredAlignment {
+ long double data;
+};
+
int main(int, char**) {
test<WithCookie<1>, NoCookie<1>>();
test<WithCookie<2>, NoCookie<2>>();
@@ -153,7 +190,18 @@ int main(int, char**) {
test<WithCookie<16>, NoCookie<16>>();
test<WithCookie<32>, NoCookie<32>>();
test<WithCookie<256>, NoCookie<256>>();
+
+ test<OveralignedWithCookie<1>, OveralignedNoCookie<1>>();
+ test<OveralignedWithCookie<2>, OveralignedNoCookie<2>>();
+ test<OveralignedWithCookie<3>, OveralignedNoCookie<3>>();
+ test<OveralignedWithCookie<4>, OveralignedNoCookie<4>>();
+ test<OveralignedWithCookie<8>, OveralignedNoCookie<8>>();
+ test<OveralignedWithCookie<16>, OveralignedNoCookie<16>>();
+ test<OveralignedWithCookie<32>, OveralignedNoCookie<32>>();
+ test<OveralignedWithCookie<256>, OveralignedNoCookie<256>>();
+
test<std::string, int>();
+ test<WithCookiePreferredAlignment, NoCookiePreferredAlignment>();
return 0;
}
diff --git a/libcxx/utils/compare-benchmarks b/libcxx/utils/compare-benchmarks
index 988e243..d165c73 100755
--- a/libcxx/utils/compare-benchmarks
+++ b/libcxx/utils/compare-benchmarks
@@ -65,9 +65,16 @@ def plain_text_comparison(data, metric, baseline_name=None, candidate_name=None)
"""
data = data.replace(numpy.nan, None) # avoid NaNs in tabulate output
headers = ['Benchmark', baseline_name, candidate_name, 'Difference', '% Difference']
- fmt = (None, '.2f', '.2f', '.2f', '.2f')
- table = data[['benchmark', f'{metric}_0', f'{metric}_1', 'difference', 'percent']].set_index('benchmark')
- return tabulate.tabulate(table, headers=headers, floatfmt=fmt, numalign='right')
+ fmt = (None, '.2f', '.2f', '.2f', '.2%')
+ table = data[['benchmark', f'{metric}_0', f'{metric}_1', 'difference', 'percent']]
+
+ # Compute the geomean and report on their difference
+ geomean_0 = statistics.geometric_mean(data[f'{metric}_0'].dropna())
+ geomean_1 = statistics.geometric_mean(data[f'{metric}_1'].dropna())
+ geomean_row = ['Geomean', geomean_0, geomean_1, (geomean_1 - geomean_0), (geomean_1 - geomean_0) / geomean_0]
+ table.loc[table.index.max()] = geomean_row
+
+ return tabulate.tabulate(table.set_index('benchmark'), headers=headers, floatfmt=fmt, numalign='right')
def create_chart(data, metric, subtitle=None, series_names=None):
"""
@@ -154,7 +161,7 @@ def main(argv):
# If we have exactly two data sets, compute additional info in new columns
if len(lnt_inputs) == 2:
data['difference'] = data[f'{args.metric}_1'] - data[f'{args.metric}_0']
- data['percent'] = 100 * (data['difference'] / data[f'{args.metric}_0'])
+ data['percent'] = data['difference'] / data[f'{args.metric}_0']
if args.filter is not None:
keeplist = [b for b in data['benchmark'] if re.search(args.filter, b) is not None]
diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp
index bfd35ae..9b67db9 100644
--- a/lld/MachO/Driver.cpp
+++ b/lld/MachO/Driver.cpp
@@ -291,6 +291,7 @@ struct DeferredFile {
};
using DeferredFiles = std::vector<DeferredFile>;
+#if LLVM_ENABLE_THREADS
class SerialBackgroundQueue {
std::deque<std::function<void()>> queue;
std::thread *running;
@@ -359,7 +360,6 @@ void multiThreadedPageInBackground(DeferredFiles &deferred) {
(void)t;
}
};
-#if LLVM_ENABLE_THREADS
{ // Create scope for waiting for the taskGroup
std::atomic_size_t index = 0;
llvm::parallel::TaskGroup taskGroup;
@@ -373,7 +373,6 @@ void multiThreadedPageInBackground(DeferredFiles &deferred) {
}
});
}
-#endif
#ifndef NDEBUG
auto dt = high_resolution_clock::now() - t0;
if (Process::GetEnv("LLD_MULTI_THREAD_PAGE"))
@@ -390,6 +389,7 @@ static void multiThreadedPageIn(const DeferredFiles &deferred) {
multiThreadedPageInBackground(files);
});
}
+#endif
static InputFile *processFile(std::optional<MemoryBufferRef> buffer,
DeferredFiles *archiveContents, StringRef path,
@@ -1430,6 +1430,7 @@ static void createFiles(const InputArgList &args) {
}
}
+#if LLVM_ENABLE_THREADS
if (config->readWorkers) {
multiThreadedPageIn(deferredFiles);
@@ -1447,6 +1448,7 @@ static void createFiles(const InputArgList &args) {
for (auto *archive : archives)
archive->addLazySymbols();
}
+#endif
}
static void gatherInputSections() {
@@ -1834,6 +1836,7 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS,
}
if (auto *arg = args.getLastArg(OPT_read_workers)) {
+#if LLVM_ENABLE_THREADS
StringRef v(arg->getValue());
unsigned workers = 0;
if (!llvm::to_integer(v, workers, 0))
@@ -1841,6 +1844,10 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS,
": expected a non-negative integer, but got '" + arg->getValue() +
"'");
config->readWorkers = workers;
+#else
+ error(arg->getSpelling() +
+ ": option unavailable because lld was not built with thread support");
+#endif
}
if (auto *arg = args.getLastArg(OPT_threads_eq)) {
StringRef v(arg->getValue());
diff --git a/lld/test/CMakeLists.txt b/lld/test/CMakeLists.txt
index abc8ea7..1bd3ad7 100644
--- a/lld/test/CMakeLists.txt
+++ b/lld/test/CMakeLists.txt
@@ -1,5 +1,6 @@
llvm_canonicalize_cmake_booleans(
ENABLE_BACKTRACES
+ LLVM_ENABLE_THREADS
LLVM_ENABLE_ZLIB
LLVM_ENABLE_ZSTD
LLVM_ENABLE_LIBXML2
diff --git a/lld/test/MachO/read-workers-no-thread-support.s b/lld/test/MachO/read-workers-no-thread-support.s
new file mode 100644
index 0000000..16256be
--- /dev/null
+++ b/lld/test/MachO/read-workers-no-thread-support.s
@@ -0,0 +1,10 @@
+# REQUIRES: x86 && !thread_support
+# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %s -o %t.o
+
+# RUN: not %lld --read-workers=1 %t.o -o /dev/null
+
+# CHECK: error: --read-workers=: option unavailable because lld was built without thread support
+
+.globl _main
+_main:
+ ret
diff --git a/lld/test/MachO/read-workers.s b/lld/test/MachO/read-workers.s
index 6f0ea4d..294106b 100644
--- a/lld/test/MachO/read-workers.s
+++ b/lld/test/MachO/read-workers.s
@@ -1,4 +1,7 @@
-# REQUIRES: x86
+# REQUIRES: x86 && thread_support
+## Sometimes fails, particularly in an ASAN build, do not run until
+## https://github.com/llvm/llvm-project/pull/157917 addresses the cause.
+# UNSUPPORTED: target={{.*}}
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %s -o %t.o
## A non-negative integer is allowed.
diff --git a/lld/test/lit.cfg.py b/lld/test/lit.cfg.py
index 3369457..39c3d0a 100644
--- a/lld/test/lit.cfg.py
+++ b/lld/test/lit.cfg.py
@@ -182,3 +182,6 @@ if tar_executable:
# ELF tests expect the default target for ld.lld to be ELF.
if config.ld_lld_default_mingw:
config.excludes.append("ELF")
+
+if config.enable_threads:
+ config.available_features.add("thread_support")
diff --git a/lld/test/lit.site.cfg.py.in b/lld/test/lit.site.cfg.py.in
index bb99976..703d3b1 100644
--- a/lld/test/lit.site.cfg.py.in
+++ b/lld/test/lit.site.cfg.py.in
@@ -26,6 +26,7 @@ config.ld_lld_default_mingw = @LLD_DEFAULT_LD_LLD_IS_MINGW@
config.build_examples = @LLVM_BUILD_EXAMPLES@
config.has_plugins = @LLVM_ENABLE_PLUGINS@
config.linked_bye_extension = @LLVM_BYE_LINK_INTO_TOOLS@
+config.enable_threads = @LLVM_ENABLE_THREADS@
import lit.llvm
lit.llvm.initialize(lit_config, config)
diff --git a/lldb/include/lldb/Utility/DataExtractor.h b/lldb/include/lldb/Utility/DataExtractor.h
index 0b7e771..b4960f5 100644
--- a/lldb/include/lldb/Utility/DataExtractor.h
+++ b/lldb/include/lldb/Utility/DataExtractor.h
@@ -994,7 +994,7 @@ protected:
constexpr size_t src_size = sizeof(T);
T val = fail_value;
- const T *src = static_cast<const T *>(GetData(offset_ptr, src_size));
+ const void *src = GetData(offset_ptr, src_size);
if (!src)
return val;
diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.cpp
index c33760e..2b2ca08 100644
--- a/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.cpp
+++ b/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.cpp
@@ -423,6 +423,46 @@ Status ObjCLanguageRuntime::ObjCExceptionPrecondition::ConfigurePrecondition(
return error;
}
+CompilerType ObjCLanguageRuntime::LookupInModulesVendor(ConstString class_name,
+ Target &target) {
+ assert(class_name);
+
+ auto *persistent_state = llvm::cast<ClangPersistentVariables>(
+ target.GetPersistentExpressionStateForLanguage(lldb::eLanguageTypeC));
+ if (!persistent_state)
+ return {};
+
+ auto clang_modules_decl_vendor_sp =
+ persistent_state->GetClangModulesDeclVendor();
+ if (!clang_modules_decl_vendor_sp)
+ return {};
+
+ auto types = clang_modules_decl_vendor_sp->FindTypes(
+ class_name, /*max_matches*/ UINT32_MAX);
+ if (types.empty())
+ return {};
+
+ return types.front();
+}
+
+CompilerType ObjCLanguageRuntime::LookupInRuntime(ConstString class_name) {
+ auto *runtime_vendor = GetDeclVendor();
+ if (!runtime_vendor)
+ return {};
+
+ std::vector<CompilerDecl> compiler_decls;
+ runtime_vendor->FindDecls(class_name, false, UINT32_MAX, compiler_decls);
+ if (compiler_decls.empty())
+ return {};
+
+ auto *ctx =
+ llvm::dyn_cast<TypeSystemClang>(compiler_decls[0].GetTypeSystem());
+ if (!ctx)
+ return {};
+
+ return ctx->GetTypeForDecl(compiler_decls[0].GetOpaqueDecl());
+}
+
std::optional<CompilerType>
ObjCLanguageRuntime::GetRuntimeType(CompilerType base_type) {
CompilerType class_type;
@@ -442,18 +482,21 @@ ObjCLanguageRuntime::GetRuntimeType(CompilerType base_type) {
if (!class_name)
return std::nullopt;
- TypeSP complete_objc_class_type_sp = LookupInCompleteClassCache(class_name);
- if (!complete_objc_class_type_sp)
- return std::nullopt;
-
- CompilerType complete_class(
- complete_objc_class_type_sp->GetFullCompilerType());
- if (complete_class.GetCompleteType()) {
- if (is_pointer_type)
- return complete_class.GetPointerType();
- else
- return complete_class;
+ if (TypeSP complete_objc_class_type_sp =
+ LookupInCompleteClassCache(class_name)) {
+ if (CompilerType complete_class =
+ complete_objc_class_type_sp->GetFullCompilerType();
+ complete_class.GetCompleteType())
+ return is_pointer_type ? complete_class.GetPointerType() : complete_class;
}
+ assert(m_process);
+ if (CompilerType found =
+ LookupInModulesVendor(class_name, m_process->GetTarget()))
+ return is_pointer_type ? found.GetPointerType() : found;
+
+ if (CompilerType found = LookupInRuntime(class_name))
+ return is_pointer_type ? found.GetPointerType() : found;
+
return std::nullopt;
}
diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.h b/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.h
index 45de098..cc8281e 100644
--- a/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.h
+++ b/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.h
@@ -465,6 +465,10 @@ protected:
ObjCLanguageRuntime(const ObjCLanguageRuntime &) = delete;
const ObjCLanguageRuntime &operator=(const ObjCLanguageRuntime &) = delete;
+
+private:
+ CompilerType LookupInRuntime(ConstString class_name);
+ CompilerType LookupInModulesVendor(ConstString class_name, Target &process);
};
} // namespace lldb_private
diff --git a/lldb/test/API/lang/objc/ivar-in-framework-base/Makefile b/lldb/test/API/lang/objc/ivar-in-framework-base/Makefile
new file mode 100644
index 0000000..c7947fc
--- /dev/null
+++ b/lldb/test/API/lang/objc/ivar-in-framework-base/Makefile
@@ -0,0 +1,6 @@
+OBJC_SOURCES := main.m lib.m
+LD_EXTRAS = -framework Foundation
+
+include Makefile.rules
+
+lib.o: CFLAGS = $(CFLAGS_NO_DEBUG)
diff --git a/lldb/test/API/lang/objc/ivar-in-framework-base/TestIvarInFrameworkBase.py b/lldb/test/API/lang/objc/ivar-in-framework-base/TestIvarInFrameworkBase.py
new file mode 100644
index 0000000..40fc6b7
--- /dev/null
+++ b/lldb/test/API/lang/objc/ivar-in-framework-base/TestIvarInFrameworkBase.py
@@ -0,0 +1,39 @@
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class TestIvarInFrameworkBase(TestBase):
+ """
+ Tests whether LLDB's data inspection commands can correctly retrieve
+ information about ivars from the Objective-C runtime.
+ In this test-case we have a base class type for which we don't have access
+ to the debug-info of the implementation (mimicking the scenario of subclassing
+ a type from a system framework). LLDB won't be able to see the backing ivar for
+ 'fooProp' from just debug-info, but it will fall back on the runtime to get the
+ necessary information.
+ """
+
+ def test_frame_var(self):
+ self.build()
+ lldbutil.run_to_source_breakpoint(self, "break here", lldb.SBFileSpec("main.m"))
+ self.expect("frame variable *bar", substrs=["_fooProp = 10", "_barProp = 15"])
+
+ def test_expr(self):
+ self.build()
+ lldbutil.run_to_source_breakpoint(self, "break here", lldb.SBFileSpec("main.m"))
+ self.expect_expr(
+ "*bar",
+ result_type="Bar",
+ result_children=[
+ ValueCheck(
+ name="Foo",
+ children=[
+ ValueCheck(name="NSObject"),
+ ValueCheck(name="_fooProp", value="10"),
+ ],
+ ),
+ ValueCheck(name="_barProp", value="15"),
+ ],
+ )
diff --git a/lldb/test/API/lang/objc/ivar-in-framework-base/lib.h b/lldb/test/API/lang/objc/ivar-in-framework-base/lib.h
new file mode 100644
index 0000000..31ceb53
--- /dev/null
+++ b/lldb/test/API/lang/objc/ivar-in-framework-base/lib.h
@@ -0,0 +1,6 @@
+#import <Foundation/Foundation.h>
+
+@interface Foo : NSObject
+@property int fooProp;
+- (id)init;
+@end
diff --git a/lldb/test/API/lang/objc/ivar-in-framework-base/lib.m b/lldb/test/API/lang/objc/ivar-in-framework-base/lib.m
new file mode 100644
index 0000000..e1bf80a
--- /dev/null
+++ b/lldb/test/API/lang/objc/ivar-in-framework-base/lib.m
@@ -0,0 +1,8 @@
+#import "lib.h"
+
+@implementation Foo
+- (id)init {
+ self.fooProp = 10;
+ return self;
+}
+@end
diff --git a/lldb/test/API/lang/objc/ivar-in-framework-base/main.m b/lldb/test/API/lang/objc/ivar-in-framework-base/main.m
new file mode 100644
index 0000000..1fd352e
--- /dev/null
+++ b/lldb/test/API/lang/objc/ivar-in-framework-base/main.m
@@ -0,0 +1,22 @@
+#import "lib.h"
+#include <stdio.h>
+
+@interface Bar : Foo
+@property int barProp;
+- (id)init;
+@end
+
+@implementation Bar
+
+- (id)init {
+ self = [super init];
+ self.barProp = 15;
+ return self;
+}
+@end
+
+int main() {
+ Bar *bar = [Bar new];
+ puts("break here");
+ return 0;
+}
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 8193adc..e062032 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -883,6 +883,8 @@ supported for the ``amdgcn`` target.
Buffer Fat Pointer 7 N/A N/A 160 0
Buffer Resource 8 N/A V# 128 0x00000000000000000000000000000000
Buffer Strided Pointer (experimental) 9 *TODO*
+ *reserved for downstream use* 10
+ *reserved for downstream use* 11
Streamout Registers 128 N/A GS_REGS
===================================== =============== =========== ================ ======= ============================
diff --git a/llvm/docs/CodeOfConduct.rst b/llvm/docs/CodeOfConduct.rst
index 645ae12..995d32b 100644
--- a/llvm/docs/CodeOfConduct.rst
+++ b/llvm/docs/CodeOfConduct.rst
@@ -171,6 +171,7 @@ The current committee members are:
Transparency Reports
====================
+* `July 15, 2025 <https://discourse.llvm.org/t/llvm-code-of-conduct-transparency-report-july-15-2024-july-15-2025/88622>`_
* `July 15, 2024 <https://discourse.llvm.org/t/llvm-code-of-conduct-transparency-report-july-15-2023-july-15-2024/82687>`_
* `July 15, 2023 <https://llvm.org/coc-reports/2023-07-15-report.html>`_
* `July 15, 2022 <https://llvm.org/coc-reports/2022-07-15-report.html>`_
diff --git a/llvm/docs/CommandGuide/dsymutil.rst b/llvm/docs/CommandGuide/dsymutil.rst
index 8764e1f..8e61e01 100644
--- a/llvm/docs/CommandGuide/dsymutil.rst
+++ b/llvm/docs/CommandGuide/dsymutil.rst
@@ -75,14 +75,6 @@ OPTIONS
Make a static variable keep the enclosing function even if it would have been
omitted otherwise.
-.. option:: --minimize, -z
-
- When used when creating a dSYM file, this option will suppress the emission of
- the .debug_inlines, .debug_pubnames, and .debug_pubtypes sections since
- dsymutil currently has better equivalents: .apple_names and .apple_types. When
- used in conjunction with ``--update`` option, this option will cause redundant
- accelerator tables to be removed.
-
.. option:: --no-object-timestamp
Don't check timestamp for object files.
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 0c54f57..5b4b53d 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -21062,12 +21062,15 @@ integer element type.
Syntax:
"""""""
-This is an overloaded intrinsic.
+This is an overloaded intrinsic. You can use ``llvm.matrix.column.major.load``
+to load any vector type with a stride of any bitwidth up to 64.
::
- declare vectorty @llvm.matrix.column.major.load.*(
+ declare <4 x i32> @llvm.matrix.column.major.load.v4i32.i64(
ptrty %Ptr, i64 %Stride, i1 <IsVolatile>, i32 <Rows>, i32 <Cols>)
+ declare <9 x double> @llvm.matrix.column.major.load.v9f64.i32(
+ ptrty %Ptr, i32 %Stride, i1 <IsVolatile>, i32 <Rows>, i32 <Cols>)
Overview:
"""""""""
@@ -21086,9 +21089,9 @@ Arguments:
The first argument ``%Ptr`` is a pointer type to the returned vector type, and
corresponds to the start address to load from. The second argument ``%Stride``
-is a positive, constant integer with ``%Stride >= <Rows>``. ``%Stride`` is used
-to compute the column memory addresses. I.e., for a column ``C``, its start
-memory addresses is calculated with ``%Ptr + C * %Stride``. The third Argument
+is a positive integer for which ``%Stride >= <Rows>``. ``%Stride`` is used to
+compute the column memory addresses. I.e., for a column ``C``, its start memory
+addresses is calculated with ``%Ptr + C * %Stride``. The third Argument
``<IsVolatile>`` is a boolean value. The fourth and fifth arguments,
``<Rows>`` and ``<Cols>``, correspond to the number of rows and columns,
respectively, and must be positive, constant integers. The returned vector must
@@ -21103,11 +21106,17 @@ The :ref:`align <attr_align>` parameter attribute can be provided for the
Syntax:
"""""""
+This is an overloaded intrinsic. ``llvm.matrix.column.major.store`` to store
+any vector type with a stride of any bitwidth up to 64.
::
- declare void @llvm.matrix.column.major.store.*(
- vectorty %In, ptrty %Ptr, i64 %Stride, i1 <IsVolatile>, i32 <Rows>, i32 <Cols>)
+ declare void @llvm.matrix.column.major.store.v4i32.i64(
+ <4 x i32> %In, ptrty %Ptr, i64 %Stride, i1 <IsVolatile>, i32 <Rows>,
+ i32 <Cols>)
+ declare void @llvm.matrix.column.major.store.v9f64.i32(
+ <9 x double> %In, ptrty %Ptr, i32 %Stride, i1 <IsVolatile>, i32
+ <Rows>, i32 <Cols>)
Overview:
"""""""""
@@ -21127,7 +21136,7 @@ Arguments:
The first argument ``%In`` is a vector that corresponds to a ``<Rows> x
<Cols>`` matrix to be stored to memory. The second argument ``%Ptr`` is a
pointer to the vector type of ``%In``, and is the start address of the matrix
-in memory. The third argument ``%Stride`` is a positive, constant integer with
+in memory. The third argument ``%Stride`` is a positive integer for which
``%Stride >= <Rows>``. ``%Stride`` is used to compute the column memory
addresses. I.e., for a column ``C``, its start memory addresses is calculated
with ``%Ptr + C * %Stride``. The fourth argument ``<IsVolatile>`` is a boolean
diff --git a/llvm/docs/SPIRVUsage.rst b/llvm/docs/SPIRVUsage.rst
index d2d6646..85eeabf 100644
--- a/llvm/docs/SPIRVUsage.rst
+++ b/llvm/docs/SPIRVUsage.rst
@@ -235,6 +235,8 @@ Below is a list of supported SPIR-V extensions, sorted alphabetically by their e
- Adds execution modes and decorations to control floating-point computations in both kernels and shaders. It can be used on whole modules and individual instructions.
* - ``SPV_INTEL_predicated_io``
- Adds predicated load and store instructions that conditionally read from or write to memory based on a boolean predicate.
+ * - ``SPV_KHR_maximal_reconvergence``
+ - Adds execution mode and capability to enable maximal reconvergence.
SPIR-V representation in LLVM IR
================================
diff --git a/llvm/docs/TableGen/BackEnds.rst b/llvm/docs/TableGen/BackEnds.rst
index 14232bc..7f57137 100644
--- a/llvm/docs/TableGen/BackEnds.rst
+++ b/llvm/docs/TableGen/BackEnds.rst
@@ -48,7 +48,7 @@ the TableGen files, the back-ends and their users.
For instance, a global contract is that each back-end produces macro-guarded
sections. Based on whether the file is included by a header or a source file,
or even in which context of each file the include is being used, you have
-todefine a macro just before including it, to get the right output:
+to define a macro just before including it, to get the right output:
.. code-block:: c++
@@ -80,8 +80,8 @@ in the TableGen files.
CodeEmitter
-----------
-**Purpose**: CodeEmitterGen uses the descriptions of instructions and their fields to
-construct an automated code emitter: a function that, given a MachineInstr,
+**Purpose**: ``CodeEmitterGen`` uses the descriptions of instructions and their fields to
+construct an automated code emitter: a function that, given a ``MachineInstr``,
returns the (currently, 32-bit unsigned) value of the instruction.
**Output**: C++ code, implementing the target's CodeEmitter
@@ -130,7 +130,7 @@ AsmMatcher
----------
**Purpose**: Emits a target specifier matcher for
-converting parsed assembly operands in the MCInst structures. It also
+converting parsed assembly operands in the ``MCInst`` structures. It also
emits a matcher for custom operand parsing. Extensive documentation is
written on the ``AsmMatcherEmitter.cpp`` file.
@@ -167,7 +167,7 @@ CallingConv
conventions supported by this target.
**Output**: Implement static functions to deal with calling conventions
-chained by matching styles, returning false on no match.
+chained by matching styles, returning ``false`` on no match.
**Usage**: Used in ISelLowering and FastIsel as function pointers to
implementation returned by a CC selection function.
@@ -200,7 +200,7 @@ FastISel
**Purpose**: This tablegen backend emits code for use by the "fast"
instruction selection algorithm. See the comments at the top of
-lib/CodeGen/SelectionDAG/FastISel.cpp for background. This file
+``lib/CodeGen/SelectionDAG/FastISel.cpp`` for background. This file
scans through the target's tablegen instruction-info files
and extracts instructions with obvious-looking patterns, and it emits
code to look up these instructions by type and operator.
@@ -270,23 +270,23 @@ This file is included as part of ``Attr.h``.
ClangAttrParserStringSwitches
-----------------------------
-**Purpose**: Creates AttrParserStringSwitches.inc, which contains
-StringSwitch::Case statements for parser-related string switches. Each switch
+**Purpose**: Creates ``AttrParserStringSwitches.inc``, which contains
+``StringSwitch::Case`` statements for parser-related string switches. Each switch
is given its own macro (such as ``CLANG_ATTR_ARG_CONTEXT_LIST``, or
``CLANG_ATTR_IDENTIFIER_ARG_LIST``), which is expected to be defined before
-including AttrParserStringSwitches.inc, and undefined after.
+including ``AttrParserStringSwitches.inc``, and undefined after.
ClangAttrImpl
-------------
-**Purpose**: Creates AttrImpl.inc, which contains semantic attribute class
+**Purpose**: Creates ``AttrImpl.inc``, which contains semantic attribute class
definitions for any attribute in ``Attr.td`` that has not set ``ASTNode = 0``.
This file is included as part of ``AttrImpl.cpp``.
ClangAttrList
-------------
-**Purpose**: Creates AttrList.inc, which is used when a list of semantic
+**Purpose**: Creates ``AttrList.inc``, which is used when a list of semantic
attribute identifiers is required. For instance, ``AttrKinds.h`` includes this
file to generate the list of ``attr::Kind`` enumeration values. This list is
separated out into multiple categories: attributes, inheritable attributes, and
@@ -297,25 +297,25 @@ functionality required for ``dyn_cast`` and similar APIs.
ClangAttrPCHRead
----------------
-**Purpose**: Creates AttrPCHRead.inc, which is used to deserialize attributes
+**Purpose**: Creates ``AttrPCHRead.inc``, which is used to deserialize attributes
in the ``ASTReader::ReadAttributes`` function.
ClangAttrPCHWrite
-----------------
-**Purpose**: Creates AttrPCHWrite.inc, which is used to serialize attributes in
+**Purpose**: Creates ``AttrPCHWrite.inc``, which is used to serialize attributes in
the ``ASTWriter::WriteAttributes`` function.
ClangAttrSpellings
---------------------
-**Purpose**: Creates AttrSpellings.inc, which is used to implement the
+**Purpose**: Creates ``AttrSpellings.inc``, which is used to implement the
``__has_attribute`` feature test macro.
ClangAttrSpellingListIndex
--------------------------
-**Purpose**: Creates AttrSpellingListIndex.inc, which is used to map parsed
+**Purpose**: Creates ``AttrSpellingListIndex.inc``, which is used to map parsed
attribute spellings (including which syntax or scope was used) to an attribute
spelling list index. These spelling list index values are internal
implementation details exposed via
@@ -324,26 +324,26 @@ implementation details exposed via
ClangAttrVisitor
-------------------
-**Purpose**: Creates AttrVisitor.inc, which is used when implementing
+**Purpose**: Creates ``AttrVisitor.inc``, which is used when implementing
recursive AST visitors.
ClangAttrTemplateInstantiate
----------------------------
-**Purpose**: Creates AttrTemplateInstantiate.inc, which implements the
+**Purpose**: Creates ``AttrTemplateInstantiate.inc``, which implements the
``instantiateTemplateAttribute`` function, used when instantiating a template
that requires an attribute to be cloned.
ClangAttrParsedAttrList
-----------------------
-**Purpose**: Creates AttrParsedAttrList.inc, which is used to generate the
+**Purpose**: Creates ``AttrParsedAttrList.inc``, which is used to generate the
``AttributeList::Kind`` parsed attribute enumeration.
ClangAttrParsedAttrImpl
-----------------------
-**Purpose**: Creates AttrParsedAttrImpl.inc, which is used by
+**Purpose**: Creates ``AttrParsedAttrImpl.inc``, which is used by
``AttributeList.cpp`` to implement several functions on the ``AttributeList``
class. This functionality is implemented via the ``AttrInfoMap ParsedAttrInfo``
array, which contains one element per parsed attribute object.
@@ -351,14 +351,14 @@ array, which contains one element per parsed attribute object.
ClangAttrParsedAttrKinds
------------------------
-**Purpose**: Creates AttrParsedAttrKinds.inc, which is used to implement the
+**Purpose**: Creates ``AttrParsedAttrKinds.inc``, which is used to implement the
``AttributeList::getKind`` function, mapping a string (and syntax) to a parsed
attribute ``AttributeList::Kind`` enumeration.
ClangAttrDump
-------------
-**Purpose**: Creates AttrDump.inc, which dumps information about an attribute.
+**Purpose**: Creates ``AttrDump.inc``, which dumps information about an attribute.
It is used to implement ``ASTDumper::dumpAttr``.
ClangDiagsDefs
@@ -424,7 +424,7 @@ Generate list of commands that are used in documentation comments.
ArmNeon
-------
-Generate arm_neon.h for clang.
+Generate ``arm_neon.h`` for clang.
ArmNeonSema
-----------
@@ -473,7 +473,7 @@ to a built-in backend.
**Output**:
-The root of the output file is a JSON object (i.e. dictionary),
+The root of the output file is a JSON object (i.e., dictionary),
containing the following fixed keys:
* ``!tablegen_json_version``: a numeric version field that will
@@ -520,7 +520,7 @@ conventions described below.
Some TableGen data types are translated directly into the
corresponding JSON type:
-* A completely undefined value (e.g. for a variable declared without
+* A completely undefined value (e.g., for a variable declared without
initializer in some superclass of this record, and never initialized
by the record itself or any other superclass) is emitted as the JSON
``null`` value.
@@ -964,7 +964,7 @@ Here is the modified lookup function.
The new lookup function will return an iterator range with first pointer to the
first result and the last pointer to the last matching result from the table.
-However, please note that the support for emitting modified definition exists
+However, please note that the support for emitting a modified definition exists
for ``PrimaryKeyName`` only.
The ``PrimaryKeyEarlyOut`` field, when set to 1, modifies the lookup
diff --git a/llvm/include/llvm-c/DebugInfo.h b/llvm/include/llvm-c/DebugInfo.h
index 2ecd69a..70da3a6 100644
--- a/llvm/include/llvm-c/DebugInfo.h
+++ b/llvm/include/llvm-c/DebugInfo.h
@@ -204,6 +204,11 @@ enum {
typedef unsigned LLVMMetadataKind;
/**
+ * The kind of checksum to emit.
+ */
+typedef enum { CSK_MD5, CSK_SHA1, CSK_SHA256 } LLVMChecksumKind;
+
+/**
* An LLVM DWARF type encoding.
*/
typedef unsigned LLVMDWARFTypeEncoding;
@@ -327,6 +332,25 @@ LLVM_C_ABI LLVMMetadataRef LLVMDIBuilderCreateFile(LLVMDIBuilderRef Builder,
size_t DirectoryLen);
/**
+ * Create a file descriptor to hold debugging information for a file.
+ * \param Builder The \c DIBuilder.
+ * \param Filename File name.
+ * \param FilenameLen The length of the C string passed to \c Filename.
+ * \param Directory Directory.
+ * \param DirectoryLen The length of the C string passed to \c Directory.
+ * \param ChecksumKind The kind of checksum. eg MD5, SHA256
+ * \param Checksum The checksum.
+ * \param ChecksumLen The length of the checksum.
+ * \param Souce The embedded source.
+ * \param SourceLen The length of the source.
+ */
+LLVM_C_ABI LLVMMetadataRef LLVMDIBuilderCreateFileWithChecksum(
+ LLVMDIBuilderRef Builder, const char *Filename, size_t FilenameLen,
+ const char *Directory, size_t DirectoryLen, LLVMChecksumKind ChecksumKind,
+ const char *Checksum, size_t ChecksumLen, const char *Source,
+ size_t SourceLen);
+
+/**
* Creates a new descriptor for a module with the specified parent scope.
* \param Builder The \c DIBuilder.
* \param ParentScope The parent scope containing this module declaration.
diff --git a/llvm/include/llvm/ADT/APFloat.h b/llvm/include/llvm/ADT/APFloat.h
index a1bfce7..bccdb89 100644
--- a/llvm/include/llvm/ADT/APFloat.h
+++ b/llvm/include/llvm/ADT/APFloat.h
@@ -138,10 +138,16 @@ enum lostFraction { // Example of truncated bits:
/// New operations: sqrt, IEEE remainder, C90 fmod, nexttoward.
///
+namespace detail {
+class IEEEFloat;
+class DoubleAPFloat;
+} // namespace detail
+
// This is the common type definitions shared by APFloat and its internal
// implementation classes. This struct should not define any non-static data
// members.
-struct APFloatBase {
+class APFloatBase {
+public:
typedef APInt::WordType integerPart;
static constexpr unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD;
@@ -257,30 +263,64 @@ struct APFloatBase {
LLVM_ABI static const llvm::fltSemantics &EnumToSemantics(Semantics S);
LLVM_ABI static Semantics SemanticsToEnum(const llvm::fltSemantics &Sem);
- LLVM_ABI static const fltSemantics &IEEEhalf() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &BFloat() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &IEEEsingle() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &IEEEdouble() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &IEEEquad() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &PPCDoubleDouble() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &PPCDoubleDoubleLegacy() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E5M2() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E5M2FNUZ() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E4M3() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E4M3FN() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E4M3FNUZ() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E4M3B11FNUZ() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E3M4() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &FloatTF32() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E8M0FNU() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float6E3M2FN() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float6E2M3FN() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float4E2M1FN() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &x87DoubleExtended() LLVM_READNONE;
+private:
+ LLVM_ABI static const fltSemantics semIEEEhalf;
+ LLVM_ABI static const fltSemantics semBFloat;
+ LLVM_ABI static const fltSemantics semIEEEsingle;
+ LLVM_ABI static const fltSemantics semIEEEdouble;
+ LLVM_ABI static const fltSemantics semIEEEquad;
+ LLVM_ABI static const fltSemantics semFloat8E5M2;
+ LLVM_ABI static const fltSemantics semFloat8E5M2FNUZ;
+ LLVM_ABI static const fltSemantics semFloat8E4M3;
+ LLVM_ABI static const fltSemantics semFloat8E4M3FN;
+ LLVM_ABI static const fltSemantics semFloat8E4M3FNUZ;
+ LLVM_ABI static const fltSemantics semFloat8E4M3B11FNUZ;
+ LLVM_ABI static const fltSemantics semFloat8E3M4;
+ LLVM_ABI static const fltSemantics semFloatTF32;
+ LLVM_ABI static const fltSemantics semFloat8E8M0FNU;
+ LLVM_ABI static const fltSemantics semFloat6E3M2FN;
+ LLVM_ABI static const fltSemantics semFloat6E2M3FN;
+ LLVM_ABI static const fltSemantics semFloat4E2M1FN;
+ LLVM_ABI static const fltSemantics semX87DoubleExtended;
+ LLVM_ABI static const fltSemantics semBogus;
+ LLVM_ABI static const fltSemantics semPPCDoubleDouble;
+ LLVM_ABI static const fltSemantics semPPCDoubleDoubleLegacy;
+
+ friend class detail::IEEEFloat;
+ friend class detail::DoubleAPFloat;
+ friend class APFloat;
+
+public:
+ static const fltSemantics &IEEEhalf() { return semIEEEhalf; }
+ static const fltSemantics &BFloat() { return semBFloat; }
+ static const fltSemantics &IEEEsingle() { return semIEEEsingle; }
+ static const fltSemantics &IEEEdouble() { return semIEEEdouble; }
+ static const fltSemantics &IEEEquad() { return semIEEEquad; }
+ static const fltSemantics &PPCDoubleDouble() { return semPPCDoubleDouble; }
+ static const fltSemantics &PPCDoubleDoubleLegacy() {
+ return semPPCDoubleDoubleLegacy;
+ }
+ static const fltSemantics &Float8E5M2() { return semFloat8E5M2; }
+ static const fltSemantics &Float8E5M2FNUZ() { return semFloat8E5M2FNUZ; }
+ static const fltSemantics &Float8E4M3() { return semFloat8E4M3; }
+ static const fltSemantics &Float8E4M3FN() { return semFloat8E4M3FN; }
+ static const fltSemantics &Float8E4M3FNUZ() { return semFloat8E4M3FNUZ; }
+ static const fltSemantics &Float8E4M3B11FNUZ() {
+ return semFloat8E4M3B11FNUZ;
+ }
+ static const fltSemantics &Float8E3M4() { return semFloat8E3M4; }
+ static const fltSemantics &FloatTF32() { return semFloatTF32; }
+ static const fltSemantics &Float8E8M0FNU() { return semFloat8E8M0FNU; }
+ static const fltSemantics &Float6E3M2FN() { return semFloat6E3M2FN; }
+ static const fltSemantics &Float6E2M3FN() { return semFloat6E2M3FN; }
+ static const fltSemantics &Float4E2M1FN() { return semFloat4E2M1FN; }
+ static const fltSemantics &x87DoubleExtended() {
+ return semX87DoubleExtended;
+ }
/// A Pseudo fltsemantic used to construct APFloats that cannot conflict with
/// anything real.
- LLVM_ABI static const fltSemantics &Bogus() LLVM_READNONE;
+ static const fltSemantics &Bogus() { return semBogus; }
// Returns true if any number described by this semantics can be precisely
// represented by the specified semantics. Does not take into account
@@ -927,69 +967,11 @@ class APFloat : public APFloatBase {
llvm_unreachable("Unexpected semantics");
}
- ~Storage() {
- if (usesLayout<IEEEFloat>(*semantics)) {
- IEEE.~IEEEFloat();
- return;
- }
- if (usesLayout<DoubleAPFloat>(*semantics)) {
- Double.~DoubleAPFloat();
- return;
- }
- llvm_unreachable("Unexpected semantics");
- }
-
- Storage(const Storage &RHS) {
- if (usesLayout<IEEEFloat>(*RHS.semantics)) {
- new (this) IEEEFloat(RHS.IEEE);
- return;
- }
- if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
- new (this) DoubleAPFloat(RHS.Double);
- return;
- }
- llvm_unreachable("Unexpected semantics");
- }
-
- Storage(Storage &&RHS) {
- if (usesLayout<IEEEFloat>(*RHS.semantics)) {
- new (this) IEEEFloat(std::move(RHS.IEEE));
- return;
- }
- if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
- new (this) DoubleAPFloat(std::move(RHS.Double));
- return;
- }
- llvm_unreachable("Unexpected semantics");
- }
-
- Storage &operator=(const Storage &RHS) {
- if (usesLayout<IEEEFloat>(*semantics) &&
- usesLayout<IEEEFloat>(*RHS.semantics)) {
- IEEE = RHS.IEEE;
- } else if (usesLayout<DoubleAPFloat>(*semantics) &&
- usesLayout<DoubleAPFloat>(*RHS.semantics)) {
- Double = RHS.Double;
- } else if (this != &RHS) {
- this->~Storage();
- new (this) Storage(RHS);
- }
- return *this;
- }
-
- Storage &operator=(Storage &&RHS) {
- if (usesLayout<IEEEFloat>(*semantics) &&
- usesLayout<IEEEFloat>(*RHS.semantics)) {
- IEEE = std::move(RHS.IEEE);
- } else if (usesLayout<DoubleAPFloat>(*semantics) &&
- usesLayout<DoubleAPFloat>(*RHS.semantics)) {
- Double = std::move(RHS.Double);
- } else if (this != &RHS) {
- this->~Storage();
- new (this) Storage(std::move(RHS));
- }
- return *this;
- }
+ LLVM_ABI ~Storage();
+ LLVM_ABI Storage(const Storage &RHS);
+ LLVM_ABI Storage(Storage &&RHS);
+ LLVM_ABI Storage &operator=(const Storage &RHS);
+ LLVM_ABI Storage &operator=(Storage &&RHS);
} U;
template <typename T> static bool usesLayout(const fltSemantics &Semantics) {
diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h
index 4bda50f..25b5262 100644
--- a/llvm/include/llvm/ADT/DenseMap.h
+++ b/llvm/include/llvm/ADT/DenseMap.h
@@ -42,7 +42,7 @@ namespace detail {
// We extend a pair to allow users to override the bucket type with their own
// implementation without requiring two members.
template <typename KeyT, typename ValueT>
-struct DenseMapPair : public std::pair<KeyT, ValueT> {
+struct DenseMapPair : std::pair<KeyT, ValueT> {
using std::pair<KeyT, ValueT>::pair;
KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
diff --git a/llvm/include/llvm/ADT/DepthFirstIterator.h b/llvm/include/llvm/ADT/DepthFirstIterator.h
index 4ced758..3c54f32 100644
--- a/llvm/include/llvm/ADT/DepthFirstIterator.h
+++ b/llvm/include/llvm/ADT/DepthFirstIterator.h
@@ -66,8 +66,8 @@ public:
// one more method, completed, which is invoked when all children of a
// node have been processed. It is intended to distinguish of back and
// cross edges in the spanning tree but is not used in the common case.
-template <typename NodeRef, unsigned SmallSize=8>
-struct df_iterator_default_set : public SmallPtrSet<NodeRef, SmallSize> {
+template <typename NodeRef, unsigned SmallSize = 8>
+struct df_iterator_default_set : SmallPtrSet<NodeRef, SmallSize> {
using BaseSet = SmallPtrSet<NodeRef, SmallSize>;
using iterator = typename BaseSet::iterator;
@@ -235,8 +235,10 @@ iterator_range<df_iterator<T>> depth_first(const T& G) {
}
// Provide global definitions of external depth first iterators...
-template <class T, class SetTy = df_iterator_default_set<typename GraphTraits<T>::NodeRef>>
-struct df_ext_iterator : public df_iterator<T, SetTy, true> {
+template <class T,
+ class SetTy =
+ df_iterator_default_set<typename GraphTraits<T>::NodeRef>>
+struct df_ext_iterator : df_iterator<T, SetTy, true> {
df_ext_iterator(const df_iterator<T, SetTy, true> &V)
: df_iterator<T, SetTy, true>(V) {}
};
@@ -262,7 +264,7 @@ template <class T,
class SetTy =
df_iterator_default_set<typename GraphTraits<T>::NodeRef>,
bool External = false>
-struct idf_iterator : public df_iterator<Inverse<T>, SetTy, External> {
+struct idf_iterator : df_iterator<Inverse<T>, SetTy, External> {
idf_iterator(const df_iterator<Inverse<T>, SetTy, External> &V)
: df_iterator<Inverse<T>, SetTy, External>(V) {}
};
@@ -284,8 +286,10 @@ iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) {
}
// Provide global definitions of external inverse depth first iterators...
-template <class T, class SetTy = df_iterator_default_set<typename GraphTraits<T>::NodeRef>>
-struct idf_ext_iterator : public idf_iterator<T, SetTy, true> {
+template <class T,
+ class SetTy =
+ df_iterator_default_set<typename GraphTraits<T>::NodeRef>>
+struct idf_ext_iterator : idf_iterator<T, SetTy, true> {
idf_ext_iterator(const idf_iterator<T, SetTy, true> &V)
: idf_iterator<T, SetTy, true>(V) {}
idf_ext_iterator(const df_iterator<Inverse<T>, SetTy, true> &V)
diff --git a/llvm/include/llvm/ADT/ImmutableSet.h b/llvm/include/llvm/ADT/ImmutableSet.h
index 310539f..8b2425e 100644
--- a/llvm/include/llvm/ADT/ImmutableSet.h
+++ b/llvm/include/llvm/ADT/ImmutableSet.h
@@ -931,8 +931,7 @@ struct ImutProfileInfo<T*> {
/// ImutContainerInfo - Generic definition of comparison operations for
/// elements of immutable containers that defaults to using
/// std::equal_to<> and std::less<> to perform comparison of elements.
-template <typename T>
-struct ImutContainerInfo : public ImutProfileInfo<T> {
+template <typename T> struct ImutContainerInfo : ImutProfileInfo<T> {
using value_type = typename ImutProfileInfo<T>::value_type;
using value_type_ref = typename ImutProfileInfo<T>::value_type_ref;
using key_type = value_type;
@@ -957,8 +956,7 @@ struct ImutContainerInfo : public ImutProfileInfo<T> {
/// ImutContainerInfo - Specialization for pointer values to treat pointers
/// as references to unique objects. Pointers are thus compared by
/// their addresses.
-template <typename T>
-struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
+template <typename T> struct ImutContainerInfo<T *> : ImutProfileInfo<T *> {
using value_type = typename ImutProfileInfo<T*>::value_type;
using value_type_ref = typename ImutProfileInfo<T*>::value_type_ref;
using key_type = value_type;
diff --git a/llvm/include/llvm/ADT/PostOrderIterator.h b/llvm/include/llvm/ADT/PostOrderIterator.h
index 1cbd3c1..d9aa452 100644
--- a/llvm/include/llvm/ADT/PostOrderIterator.h
+++ b/llvm/include/llvm/ADT/PostOrderIterator.h
@@ -200,7 +200,7 @@ template <class T> iterator_range<po_iterator<T>> post_order(const T &G) {
// Provide global definitions of external postorder iterators...
template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
-struct po_ext_iterator : public po_iterator<T, SetType, true> {
+struct po_ext_iterator : po_iterator<T, SetType, true> {
po_ext_iterator(const po_iterator<T, SetType, true> &V) :
po_iterator<T, SetType, true>(V) {}
};
@@ -223,7 +223,7 @@ iterator_range<po_ext_iterator<T, SetType>> post_order_ext(const T &G, SetType &
// Provide global definitions of inverse post order iterators...
template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>,
bool External = false>
-struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External> {
+struct ipo_iterator : po_iterator<Inverse<T>, SetType, External> {
ipo_iterator(const po_iterator<Inverse<T>, SetType, External> &V) :
po_iterator<Inverse<T>, SetType, External> (V) {}
};
@@ -245,7 +245,7 @@ iterator_range<ipo_iterator<T>> inverse_post_order(const T &G) {
// Provide global definitions of external inverse postorder iterators...
template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
-struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
+struct ipo_ext_iterator : ipo_iterator<T, SetType, true> {
ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
ipo_iterator<T, SetType, true>(V) {}
ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) :
diff --git a/llvm/include/llvm/ADT/STLExtras.h b/llvm/include/llvm/ADT/STLExtras.h
index 658f262..a9841c6 100644
--- a/llvm/include/llvm/ADT/STLExtras.h
+++ b/llvm/include/llvm/ADT/STLExtras.h
@@ -674,7 +674,7 @@ using zip_traits = iterator_facade_base<
ReferenceTupleType *, ReferenceTupleType>;
template <typename ZipType, typename ReferenceTupleType, typename... Iters>
-struct zip_common : public zip_traits<ZipType, ReferenceTupleType, Iters...> {
+struct zip_common : zip_traits<ZipType, ReferenceTupleType, Iters...> {
using Base = zip_traits<ZipType, ReferenceTupleType, Iters...>;
using IndexSequence = std::index_sequence_for<Iters...>;
using value_type = typename Base::value_type;
diff --git a/llvm/include/llvm/ADT/STLForwardCompat.h b/llvm/include/llvm/ADT/STLForwardCompat.h
index da9d3ab0..273a5cf 100644
--- a/llvm/include/llvm/ADT/STLForwardCompat.h
+++ b/llvm/include/llvm/ADT/STLForwardCompat.h
@@ -26,6 +26,54 @@ namespace llvm {
// Features from C++20
//===----------------------------------------------------------------------===//
+namespace numbers {
+// clang-format off
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T e_v = T(0x1.5bf0a8b145769P+1); // (2.7182818284590452354) https://oeis.org/A001113
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T egamma_v = T(0x1.2788cfc6fb619P-1); // (.57721566490153286061) https://oeis.org/A001620
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T ln2_v = T(0x1.62e42fefa39efP-1); // (.69314718055994530942) https://oeis.org/A002162
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T ln10_v = T(0x1.26bb1bbb55516P+1); // (2.3025850929940456840) https://oeis.org/A002392
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T log2e_v = T(0x1.71547652b82feP+0); // (1.4426950408889634074)
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T log10e_v = T(0x1.bcb7b1526e50eP-2); // (.43429448190325182765)
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T pi_v = T(0x1.921fb54442d18P+1); // (3.1415926535897932385) https://oeis.org/A000796
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T inv_pi_v = T(0x1.45f306dc9c883P-2); // (.31830988618379067154) https://oeis.org/A049541
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T inv_sqrtpi_v = T(0x1.20dd750429b6dP-1); // (.56418958354775628695) https://oeis.org/A087197
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T sqrt2_v = T(0x1.6a09e667f3bcdP+0); // (1.4142135623730950488) https://oeis.org/A00219
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T inv_sqrt2_v = T(0x1.6a09e667f3bcdP-1); // (.70710678118654752440)
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T sqrt3_v = T(0x1.bb67ae8584caaP+0); // (1.7320508075688772935) https://oeis.org/A002194
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T inv_sqrt3_v = T(0x1.279a74590331cP-1); // (.57735026918962576451)
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T phi_v = T(0x1.9e3779b97f4a8P+0); // (1.6180339887498948482) https://oeis.org/A001622
+
+inline constexpr double e = e_v<double>;
+inline constexpr double egamma = egamma_v<double>;
+inline constexpr double ln2 = ln2_v<double>;
+inline constexpr double ln10 = ln10_v<double>;
+inline constexpr double log2e = log2e_v<double>;
+inline constexpr double log10e = log10e_v<double>;
+inline constexpr double pi = pi_v<double>;
+inline constexpr double inv_pi = inv_pi_v<double>;
+inline constexpr double inv_sqrtpi = inv_sqrtpi_v<double>;
+inline constexpr double sqrt2 = sqrt2_v<double>;
+inline constexpr double inv_sqrt2 = inv_sqrt2_v<double>;
+inline constexpr double sqrt3 = sqrt3_v<double>;
+inline constexpr double inv_sqrt3 = inv_sqrt3_v<double>;
+inline constexpr double phi = phi_v<double>;
+// clang-format on
+} // namespace numbers
+
template <typename T>
struct remove_cvref // NOLINT(readability-identifier-naming)
{
diff --git a/llvm/include/llvm/ADT/SmallPtrSet.h b/llvm/include/llvm/ADT/SmallPtrSet.h
index f588a77..8e7c8b3 100644
--- a/llvm/include/llvm/ADT/SmallPtrSet.h
+++ b/llvm/include/llvm/ADT/SmallPtrSet.h
@@ -532,18 +532,8 @@ class SmallPtrSet : public SmallPtrSetImpl<PtrType> {
using BaseT = SmallPtrSetImpl<PtrType>;
- // A constexpr version of llvm::bit_ceil.
- // TODO: Replace this with std::bit_ceil once C++20 is available.
- static constexpr size_t RoundUpToPowerOfTwo(size_t X) {
- size_t C = 1;
- size_t CMax = C << (std::numeric_limits<size_t>::digits - 1);
- while (C < X && C < CMax)
- C <<= 1;
- return C;
- }
-
// Make sure that SmallSize is a power of two, round up if not.
- static constexpr size_t SmallSizePowTwo = RoundUpToPowerOfTwo(SmallSize);
+ static constexpr size_t SmallSizePowTwo = llvm::bit_ceil_constexpr(SmallSize);
/// SmallStorage - Fixed size storage used in 'small mode'.
const void *SmallStorage[SmallSizePowTwo];
diff --git a/llvm/include/llvm/ADT/bit.h b/llvm/include/llvm/ADT/bit.h
index 66c4f94..8b60b69 100644
--- a/llvm/include/llvm/ADT/bit.h
+++ b/llvm/include/llvm/ADT/bit.h
@@ -336,34 +336,44 @@ template <typename T> [[nodiscard]] T bit_ceil(T Value) {
return T(1) << llvm::bit_width<T>(Value - 1u);
}
-// Forward-declare rotr so that rotl can use it.
-template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
-[[nodiscard]] constexpr T rotr(T V, int R);
+/// Returns the smallest integral power of two no smaller than Value if Value is
+/// nonzero. Returns 1 otherwise.
+///
+/// Ex. bit_ceil(5) == 8.
+///
+/// The return value is undefined if the input is larger than the largest power
+/// of two representable in T.
+template <typename T> [[nodiscard]] constexpr T bit_ceil_constexpr(T Value) {
+ static_assert(std::is_unsigned_v<T>,
+ "Only unsigned integral types are allowed.");
+ if (Value < 2)
+ return 1;
+ return T(1) << llvm::bit_width_constexpr<T>(Value - 1u);
+}
template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
[[nodiscard]] constexpr T rotl(T V, int R) {
- unsigned N = std::numeric_limits<T>::digits;
+ constexpr unsigned N = std::numeric_limits<T>::digits;
- R = R % N;
- if (!R)
- return V;
+ static_assert(has_single_bit(N), "& (N - 1) is only valid for powers of two");
+ R = R & (N - 1);
- if (R < 0)
- return llvm::rotr(V, -R);
+ if (R == 0)
+ return V;
return (V << R) | (V >> (N - R));
}
-template <typename T, typename> [[nodiscard]] constexpr T rotr(T V, int R) {
- unsigned N = std::numeric_limits<T>::digits;
+template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
+[[nodiscard]] constexpr T rotr(T V, int R) {
+ constexpr unsigned N = std::numeric_limits<T>::digits;
+
+ static_assert(has_single_bit(N), "& (N - 1) is only valid for powers of two");
+ R = R & (N - 1);
- R = R % N;
- if (!R)
+ if (R == 0)
return V;
- if (R < 0)
- return llvm::rotl(V, -R);
-
return (V >> R) | (V << (N - R));
}
diff --git a/llvm/include/llvm/Analysis/LoopAnalysisManager.h b/llvm/include/llvm/Analysis/LoopAnalysisManager.h
index fc69cb0..1755257 100644
--- a/llvm/include/llvm/Analysis/LoopAnalysisManager.h
+++ b/llvm/include/llvm/Analysis/LoopAnalysisManager.h
@@ -36,7 +36,6 @@ namespace llvm {
class AAResults;
class AssumptionCache;
-class BlockFrequencyInfo;
class DominatorTree;
class Function;
class Loop;
@@ -58,7 +57,6 @@ struct LoopStandardAnalysisResults {
ScalarEvolution &SE;
TargetLibraryInfo &TLI;
TargetTransformInfo &TTI;
- BlockFrequencyInfo *BFI;
MemorySSA *MSSA;
};
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index e5a6c8c..3d3ec14 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -1345,6 +1345,7 @@ public:
class LoopGuards {
DenseMap<const SCEV *, const SCEV *> RewriteMap;
+ SmallDenseSet<std::pair<const SCEV *, const SCEV *>> NotEqual;
bool PreserveNUW = false;
bool PreserveNSW = false;
ScalarEvolution &SE;
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
index 68198ec..9354eef 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
@@ -256,6 +256,18 @@ m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1) {
return m_scev_Binary<SCEVUDivExpr>(Op0, Op1);
}
+template <typename Op0_t, typename Op1_t>
+inline SCEVBinaryExpr_match<SCEVSMaxExpr, Op0_t, Op1_t>
+m_scev_SMax(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_scev_Binary<SCEVSMaxExpr>(Op0, Op1);
+}
+
+template <typename Op0_t, typename Op1_t>
+inline SCEVBinaryExpr_match<SCEVMinMaxExpr, Op0_t, Op1_t>
+m_scev_MinMax(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_scev_Binary<SCEVMinMaxExpr>(Op0, Op1);
+}
+
/// Match unsigned remainder pattern.
/// Matches patterns generated by getURemExpr.
template <typename Op0_t, typename Op1_t> struct SCEVURem_match {
diff --git a/llvm/include/llvm/IR/DebugProgramInstruction.h b/llvm/include/llvm/IR/DebugProgramInstruction.h
index e0292c2..457c60e3b 100644
--- a/llvm/include/llvm/IR/DebugProgramInstruction.h
+++ b/llvm/include/llvm/IR/DebugProgramInstruction.h
@@ -14,7 +14,7 @@
// dbg.value(metadata i32 %foo, ...)
// %bar = void call @ext(%foo);
//
-// and all information is stored in the Value / Metadata hierachy defined
+// and all information is stored in the Value / Metadata hierarchy defined
// elsewhere in LLVM. In the "DbgRecord" design, each instruction /may/ have a
// connection with a DbgMarker, which identifies a position immediately before
// the instruction, and each DbgMarker /may/ then have connections to DbgRecords
@@ -37,7 +37,7 @@
//
// This structure separates the two concerns of the position of the debug-info
// in the function, and the Value that it refers to. It also creates a new
-// "place" in-between the Value / Metadata hierachy where we can customise
+// "place" in-between the Value / Metadata hierarchy where we can customise
// storage and allocation techniques to better suite debug-info workloads.
// NB: as of the initial prototype, none of that has actually been attempted
// yet.
@@ -162,7 +162,7 @@ public:
LLVM_ABI bool isIdenticalToWhenDefined(const DbgRecord &R) const;
/// Convert this DbgRecord back into an appropriate llvm.dbg.* intrinsic.
/// \p InsertBefore Optional position to insert this intrinsic.
- /// \returns A new llvm.dbg.* intrinsic representiung this DbgRecord.
+ /// \returns A new llvm.dbg.* intrinsic representing this DbgRecord.
LLVM_ABI DbgInfoIntrinsic *
createDebugIntrinsic(Module *M, Instruction *InsertBefore) const;
///@}
@@ -530,7 +530,7 @@ public:
LLVM_ABI void setKillAddress();
/// Check whether this kills the address component. This doesn't take into
/// account the position of the intrinsic, therefore a returned value of false
- /// does not guarentee the address is a valid location for the variable at the
+ /// does not guarantee the address is a valid location for the variable at the
/// intrinsic's position in IR.
LLVM_ABI bool isKillAddress() const;
@@ -539,7 +539,7 @@ public:
LLVM_ABI DbgVariableRecord *clone() const;
/// Convert this DbgVariableRecord back into a dbg.value intrinsic.
/// \p InsertBefore Optional position to insert this intrinsic.
- /// \returns A new dbg.value intrinsic representiung this DbgVariableRecord.
+ /// \returns A new dbg.value intrinsic representing this DbgVariableRecord.
LLVM_ABI DbgVariableIntrinsic *
createDebugIntrinsic(Module *M, Instruction *InsertBefore) const;
diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
index 04d0391..58822a0 100644
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -484,8 +484,8 @@ public:
/// Remove every uses that can safely be removed.
///
/// This will remove for example uses in llvm.assume.
- /// This should be used when performing want to perform a tranformation but
- /// some Droppable uses pervent it.
+ /// This should be used when performing want to perform a transformation but
+ /// some Droppable uses prevent it.
/// This function optionally takes a filter to only remove some droppable
/// uses.
LLVM_ABI void
diff --git a/llvm/include/llvm/Support/Alignment.h b/llvm/include/llvm/Support/Alignment.h
index a4ca54e..f9d7c76 100644
--- a/llvm/include/llvm/Support/Alignment.h
+++ b/llvm/include/llvm/Support/Alignment.h
@@ -103,7 +103,7 @@ inline Align assumeAligned(uint64_t Value) {
/// This struct is a compact representation of a valid (power of two) or
/// undefined (0) alignment.
-struct MaybeAlign : public std::optional<Align> {
+struct MaybeAlign : std::optional<Align> {
private:
using UP = std::optional<Align>;
diff --git a/llvm/include/llvm/Support/Casting.h b/llvm/include/llvm/Support/Casting.h
index 2a9a149..6f6df2e 100644
--- a/llvm/include/llvm/Support/Casting.h
+++ b/llvm/include/llvm/Support/Casting.h
@@ -340,7 +340,7 @@ struct ValueFromPointerCast
/// during the cast. It's also a good example of how to implement a move-only
/// cast.
template <typename To, typename From, typename Derived = void>
-struct UniquePtrCast : public CastIsPossible<To, From *> {
+struct UniquePtrCast : CastIsPossible<To, From *> {
using Self = detail::SelfType<Derived, UniquePtrCast<To, From>>;
using CastResultType = std::unique_ptr<
std::remove_reference_t<typename cast_retty<To, From>::ret_type>>;
@@ -473,7 +473,7 @@ struct ForwardToPointerCast {
// take advantage of the cast traits whenever possible!
template <typename To, typename From, typename Enable = void>
-struct CastInfo : public CastIsPossible<To, From> {
+struct CastInfo : CastIsPossible<To, From> {
using Self = CastInfo<To, From, Enable>;
using CastReturnType = typename cast_retty<To, From>::ret_type;
@@ -536,8 +536,7 @@ struct CastInfo<To, std::unique_ptr<From>> : public UniquePtrCast<To, From> {};
/// the input is std::optional<From> that the output can be std::optional<To>.
/// If that's not the case, specialize CastInfo for your use case.
template <typename To, typename From>
-struct CastInfo<To, std::optional<From>> : public OptionalValueCast<To, From> {
-};
+struct CastInfo<To, std::optional<From>> : OptionalValueCast<To, From> {};
/// isa<X> - Return true if the parameter to the template is an instance of one
/// of the template type arguments. Used like this:
diff --git a/llvm/include/llvm/Support/CommandLine.h b/llvm/include/llvm/Support/CommandLine.h
index dd05c53..5a5f00e 100644
--- a/llvm/include/llvm/Support/CommandLine.h
+++ b/llvm/include/llvm/Support/CommandLine.h
@@ -549,7 +549,7 @@ template <class DataType> struct OptionValue;
// The default value safely does nothing. Option value printing is only
// best-effort.
template <class DataType, bool isClass>
-struct OptionValueBase : public GenericOptionValue {
+struct OptionValueBase : GenericOptionValue {
// Temporary storage for argument passing.
using WrapperType = OptionValue<DataType>;
diff --git a/llvm/include/llvm/Support/DOTGraphTraits.h b/llvm/include/llvm/Support/DOTGraphTraits.h
index ffa9abe..3b9fe00 100644
--- a/llvm/include/llvm/Support/DOTGraphTraits.h
+++ b/llvm/include/llvm/Support/DOTGraphTraits.h
@@ -162,9 +162,8 @@ public:
/// graphs are converted to 'dot' graphs. When specializing, you may inherit
/// from DefaultDOTGraphTraits if you don't need to override everything.
///
-template <typename Ty>
-struct DOTGraphTraits : public DefaultDOTGraphTraits {
- DOTGraphTraits (bool simple=false) : DefaultDOTGraphTraits (simple) {}
+template <typename Ty> struct DOTGraphTraits : DefaultDOTGraphTraits {
+ using DefaultDOTGraphTraits::DefaultDOTGraphTraits;
};
} // End llvm namespace
diff --git a/llvm/include/llvm/Support/ELFAttributes.h b/llvm/include/llvm/Support/ELFAttributes.h
index 270246f..5771a84 100644
--- a/llvm/include/llvm/Support/ELFAttributes.h
+++ b/llvm/include/llvm/Support/ELFAttributes.h
@@ -48,8 +48,6 @@ struct SubsectionAndTagToTagName {
StringRef SubsectionName;
unsigned Tag;
StringRef TagName;
- SubsectionAndTagToTagName(StringRef SN, unsigned Tg, StringRef TN)
- : SubsectionName(SN), Tag(Tg), TagName(TN) {}
};
namespace ELFAttrs {
diff --git a/llvm/include/llvm/Support/LSP/Protocol.h b/llvm/include/llvm/Support/LSP/Protocol.h
index 93b82f1..e38203a 100644
--- a/llvm/include/llvm/Support/LSP/Protocol.h
+++ b/llvm/include/llvm/Support/LSP/Protocol.h
@@ -449,7 +449,7 @@ struct ReferenceContext {
bool fromJSON(const llvm::json::Value &value, ReferenceContext &result,
llvm::json::Path path);
-struct ReferenceParams : public TextDocumentPositionParams {
+struct ReferenceParams : TextDocumentPositionParams {
ReferenceContext context;
};
diff --git a/llvm/include/llvm/Support/MD5.h b/llvm/include/llvm/Support/MD5.h
index ed29826..4ba3867 100644
--- a/llvm/include/llvm/Support/MD5.h
+++ b/llvm/include/llvm/Support/MD5.h
@@ -41,7 +41,7 @@ template <typename T> class ArrayRef;
class MD5 {
public:
- struct MD5Result : public std::array<uint8_t, 16> {
+ struct MD5Result : std::array<uint8_t, 16> {
LLVM_ABI SmallString<32> digest() const;
uint64_t low() const {
diff --git a/llvm/include/llvm/Support/MathExtras.h b/llvm/include/llvm/Support/MathExtras.h
index c2716a9..41232335 100644
--- a/llvm/include/llvm/Support/MathExtras.h
+++ b/llvm/include/llvm/Support/MathExtras.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_MATHEXTRAS_H
#define LLVM_SUPPORT_MATHEXTRAS_H
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/bit.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
@@ -42,38 +43,28 @@ using common_sint =
/// Mathematical constants.
namespace numbers {
-// TODO: Track C++20 std::numbers.
// clang-format off
-constexpr double e = 0x1.5bf0a8b145769P+1, // (2.7182818284590452354) https://oeis.org/A001113
- egamma = 0x1.2788cfc6fb619P-1, // (.57721566490153286061) https://oeis.org/A001620
- ln2 = 0x1.62e42fefa39efP-1, // (.69314718055994530942) https://oeis.org/A002162
- ln10 = 0x1.26bb1bbb55516P+1, // (2.3025850929940456840) https://oeis.org/A002392
- log2e = 0x1.71547652b82feP+0, // (1.4426950408889634074)
- log10e = 0x1.bcb7b1526e50eP-2, // (.43429448190325182765)
- pi = 0x1.921fb54442d18P+1, // (3.1415926535897932385) https://oeis.org/A000796
- inv_pi = 0x1.45f306dc9c883P-2, // (.31830988618379067154) https://oeis.org/A049541
- sqrtpi = 0x1.c5bf891b4ef6bP+0, // (1.7724538509055160273) https://oeis.org/A002161
- inv_sqrtpi = 0x1.20dd750429b6dP-1, // (.56418958354775628695) https://oeis.org/A087197
- sqrt2 = 0x1.6a09e667f3bcdP+0, // (1.4142135623730950488) https://oeis.org/A00219
- inv_sqrt2 = 0x1.6a09e667f3bcdP-1, // (.70710678118654752440)
- sqrt3 = 0x1.bb67ae8584caaP+0, // (1.7320508075688772935) https://oeis.org/A002194
- inv_sqrt3 = 0x1.279a74590331cP-1, // (.57735026918962576451)
- phi = 0x1.9e3779b97f4a8P+0; // (1.6180339887498948482) https://oeis.org/A001622
-constexpr float ef = 0x1.5bf0a8P+1F, // (2.71828183) https://oeis.org/A001113
- egammaf = 0x1.2788d0P-1F, // (.577215665) https://oeis.org/A001620
- ln2f = 0x1.62e430P-1F, // (.693147181) https://oeis.org/A002162
- ln10f = 0x1.26bb1cP+1F, // (2.30258509) https://oeis.org/A002392
- log2ef = 0x1.715476P+0F, // (1.44269504)
- log10ef = 0x1.bcb7b2P-2F, // (.434294482)
- pif = 0x1.921fb6P+1F, // (3.14159265) https://oeis.org/A000796
- inv_pif = 0x1.45f306P-2F, // (.318309886) https://oeis.org/A049541
- sqrtpif = 0x1.c5bf8aP+0F, // (1.77245385) https://oeis.org/A002161
- inv_sqrtpif = 0x1.20dd76P-1F, // (.564189584) https://oeis.org/A087197
- sqrt2f = 0x1.6a09e6P+0F, // (1.41421356) https://oeis.org/A002193
- inv_sqrt2f = 0x1.6a09e6P-1F, // (.707106781)
- sqrt3f = 0x1.bb67aeP+0F, // (1.73205081) https://oeis.org/A002194
- inv_sqrt3f = 0x1.279a74P-1F, // (.577350269)
- phif = 0x1.9e377aP+0F; // (1.61803399) https://oeis.org/A001622
+inline constexpr float ef = e_v<float>;
+inline constexpr float egammaf = egamma_v<float>;
+inline constexpr float ln2f = ln2_v<float>;
+inline constexpr float ln10f = ln10_v<float>;
+inline constexpr float log2ef = log2e_v<float>;
+inline constexpr float log10ef = log10e_v<float>;
+inline constexpr float pif = pi_v<float>;
+inline constexpr float inv_pif = inv_pi_v<float>;
+inline constexpr float inv_sqrtpif = inv_sqrtpi_v<float>;
+inline constexpr float sqrt2f = sqrt2_v<float>;
+inline constexpr float inv_sqrt2f = inv_sqrt2_v<float>;
+inline constexpr float sqrt3f = sqrt3_v<float>;
+inline constexpr float inv_sqrt3f = inv_sqrt3_v<float>;
+inline constexpr float phif = phi_v<float>;
+
+// sqrtpi is not in C++20 std::numbers.
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T sqrtpi_v = T(0x1.c5bf891b4ef6bP+0); // (1.7724538509055160273) https://oeis.org/A002161
+inline constexpr double sqrtpi = sqrtpi_v<double>;
+inline constexpr float sqrtpif = sqrtpi_v<float>;
+
// These string literals are taken from below:
// https://github.com/bminor/glibc/blob/8543577b04ded6d979ffcc5a818930e4d74d0645/math/math.h#L1215-L1229
constexpr const char *pis = "3.141592653589793238462643383279502884",
diff --git a/llvm/include/llvm/Support/Timer.h b/llvm/include/llvm/Support/Timer.h
index 40709d4..a4ed712 100644
--- a/llvm/include/llvm/Support/Timer.h
+++ b/llvm/include/llvm/Support/Timer.h
@@ -167,7 +167,7 @@ public:
/// you to declare a new timer, AND specify the region to time, all in one
/// statement. All timers with the same name are merged. This is primarily
/// used for debugging and for hunting performance problems.
-struct NamedRegionTimer : public TimeRegion {
+struct NamedRegionTimer : TimeRegion {
LLVM_ABI explicit NamedRegionTimer(StringRef Name, StringRef Description,
StringRef GroupName,
StringRef GroupDescription,
diff --git a/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h b/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
index 750f954..1842d2d 100644
--- a/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
+++ b/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
@@ -404,10 +404,8 @@ public:
explicit FunctionToLoopPassAdaptor(std::unique_ptr<PassConceptT> Pass,
bool UseMemorySSA = false,
- bool UseBlockFrequencyInfo = false,
bool LoopNestMode = false)
: Pass(std::move(Pass)), UseMemorySSA(UseMemorySSA),
- UseBlockFrequencyInfo(UseBlockFrequencyInfo),
LoopNestMode(LoopNestMode) {
LoopCanonicalizationFPM.addPass(LoopSimplifyPass());
LoopCanonicalizationFPM.addPass(LCSSAPass());
@@ -429,7 +427,6 @@ private:
FunctionPassManager LoopCanonicalizationFPM;
bool UseMemorySSA = false;
- bool UseBlockFrequencyInfo = false;
const bool LoopNestMode;
};
@@ -442,8 +439,7 @@ private:
/// \c LoopPassManager and the returned adaptor will be in loop-nest mode.
template <typename LoopPassT>
inline FunctionToLoopPassAdaptor
-createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false,
- bool UseBlockFrequencyInfo = false) {
+createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false) {
if constexpr (is_detected<HasRunOnLoopT, LoopPassT>::value) {
using PassModelT =
detail::PassModel<Loop, LoopPassT, LoopAnalysisManager,
@@ -453,7 +449,7 @@ createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false,
return FunctionToLoopPassAdaptor(
std::unique_ptr<FunctionToLoopPassAdaptor::PassConceptT>(
new PassModelT(std::forward<LoopPassT>(Pass))),
- UseMemorySSA, UseBlockFrequencyInfo, false);
+ UseMemorySSA, false);
} else {
LoopPassManager LPM;
LPM.addPass(std::forward<LoopPassT>(Pass));
@@ -465,7 +461,7 @@ createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false,
return FunctionToLoopPassAdaptor(
std::unique_ptr<FunctionToLoopPassAdaptor::PassConceptT>(
new PassModelT(std::move(LPM))),
- UseMemorySSA, UseBlockFrequencyInfo, true);
+ UseMemorySSA, true);
}
}
@@ -474,8 +470,7 @@ createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false,
template <>
inline FunctionToLoopPassAdaptor
createFunctionToLoopPassAdaptor<LoopPassManager>(LoopPassManager &&LPM,
- bool UseMemorySSA,
- bool UseBlockFrequencyInfo) {
+ bool UseMemorySSA) {
// Check if LPM contains any loop pass and if it does not, returns an adaptor
// in loop-nest mode.
using PassModelT =
@@ -487,7 +482,7 @@ createFunctionToLoopPassAdaptor<LoopPassManager>(LoopPassManager &&LPM,
return FunctionToLoopPassAdaptor(
std::unique_ptr<FunctionToLoopPassAdaptor::PassConceptT>(
new PassModelT(std::move(LPM))),
- UseMemorySSA, UseBlockFrequencyInfo, LoopNestMode);
+ UseMemorySSA, LoopNestMode);
}
/// Pass for printing a loop's contents as textual IR.
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 45c889c..a5ba197 100755
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -2177,16 +2177,13 @@ Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
return PoisonValue::get(VT->getElementType());
// TODO: Handle undef.
- if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
- return nullptr;
-
- auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
+ auto *EltC = dyn_cast_or_null<ConstantInt>(Op->getAggregateElement(0U));
if (!EltC)
return nullptr;
APInt Acc = EltC->getValue();
for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
- if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
+ if (!(EltC = dyn_cast_or_null<ConstantInt>(Op->getAggregateElement(I))))
return nullptr;
const APInt &X = EltC->getValue();
switch (IID) {
@@ -3059,35 +3056,25 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
Val = Val | Val << 1;
return ConstantInt::get(Ty, Val);
}
-
- default:
- return nullptr;
}
}
- switch (IntrinsicID) {
- default: break;
- case Intrinsic::vector_reduce_add:
- case Intrinsic::vector_reduce_mul:
- case Intrinsic::vector_reduce_and:
- case Intrinsic::vector_reduce_or:
- case Intrinsic::vector_reduce_xor:
- case Intrinsic::vector_reduce_smin:
- case Intrinsic::vector_reduce_smax:
- case Intrinsic::vector_reduce_umin:
- case Intrinsic::vector_reduce_umax:
- if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
- return C;
- break;
- }
-
- // Support ConstantVector in case we have an Undef in the top.
- if (isa<ConstantVector>(Operands[0]) ||
- isa<ConstantDataVector>(Operands[0]) ||
- isa<ConstantAggregateZero>(Operands[0])) {
+ if (Operands[0]->getType()->isVectorTy()) {
auto *Op = cast<Constant>(Operands[0]);
switch (IntrinsicID) {
default: break;
+ case Intrinsic::vector_reduce_add:
+ case Intrinsic::vector_reduce_mul:
+ case Intrinsic::vector_reduce_and:
+ case Intrinsic::vector_reduce_or:
+ case Intrinsic::vector_reduce_xor:
+ case Intrinsic::vector_reduce_smin:
+ case Intrinsic::vector_reduce_smax:
+ case Intrinsic::vector_reduce_umin:
+ case Intrinsic::vector_reduce_umax:
+ if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
+ return C;
+ break;
case Intrinsic::x86_sse_cvtss2si:
case Intrinsic::x86_sse_cvtss2si64:
case Intrinsic::x86_sse2_cvtsd2si:
@@ -3116,10 +3103,15 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
case Intrinsic::wasm_alltrue:
// Check each element individually
unsigned E = cast<FixedVectorType>(Op->getType())->getNumElements();
- for (unsigned I = 0; I != E; ++I)
- if (Constant *Elt = Op->getAggregateElement(I))
- if (Elt->isZeroValue())
- return ConstantInt::get(Ty, 0);
+ for (unsigned I = 0; I != E; ++I) {
+ Constant *Elt = Op->getAggregateElement(I);
+ // Return false as soon as we find a non-true element.
+ if (Elt && Elt->isZeroValue())
+ return ConstantInt::get(Ty, 0);
+ // Bail as soon as we find an element we cannot prove to be true.
+ if (!Elt || !isa<ConstantInt>(Elt))
+ return nullptr;
+ }
return ConstantInt::get(Ty, 1);
}
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 0e5bc48..df75999 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -947,9 +947,8 @@ LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
/*UseBlockValue*/ false));
}
- ValueLatticeElement Result = TrueVal;
- Result.mergeIn(FalseVal);
- return Result;
+ TrueVal.mergeIn(FalseVal);
+ return TrueVal;
}
std::optional<ConstantRange>
@@ -1778,9 +1777,8 @@ ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
assert(OptResult && "Value not available after solving");
}
- ValueLatticeElement Result = *OptResult;
- LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
- return Result;
+ LLVM_DEBUG(dbgs() << " Result = " << *OptResult << "\n");
+ return *OptResult;
}
ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 442b9d1..425420f 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -1840,19 +1840,19 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
// = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
//
- if (SM->getNumOperands() == 2)
- if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
- if (MulLHS->getAPInt().isPowerOf2())
- if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
- int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
- MulLHS->getAPInt().logBase2();
- Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
- return getMulExpr(
- getZeroExtendExpr(MulLHS, Ty),
- getZeroExtendExpr(
- getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
- SCEV::FlagNUW, Depth + 1);
- }
+ const APInt *C;
+ const SCEV *TruncRHS;
+ if (match(SM,
+ m_scev_Mul(m_scev_APInt(C), m_scev_Trunc(m_SCEV(TruncRHS)))) &&
+ C->isPowerOf2()) {
+ int NewTruncBits =
+ getTypeSizeInBits(SM->getOperand(1)->getType()) - C->logBase2();
+ Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
+ return getMulExpr(
+ getZeroExtendExpr(SM->getOperand(0), Ty),
+ getZeroExtendExpr(getTruncateExpr(TruncRHS, NewTruncTy), Ty),
+ SCEV::FlagNUW, Depth + 1);
+ }
}
// zext(umin(x, y)) -> umin(zext(x), zext(y))
@@ -3144,20 +3144,19 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
if (Ops.size() == 2) {
// C1*(C2+V) -> C1*C2 + C1*V
- if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
- // If any of Add's ops are Adds or Muls with a constant, apply this
- // transformation as well.
- //
- // TODO: There are some cases where this transformation is not
- // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
- // this transformation should be narrowed down.
- if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) {
- const SCEV *LHS = getMulExpr(LHSC, Add->getOperand(0),
- SCEV::FlagAnyWrap, Depth + 1);
- const SCEV *RHS = getMulExpr(LHSC, Add->getOperand(1),
- SCEV::FlagAnyWrap, Depth + 1);
- return getAddExpr(LHS, RHS, SCEV::FlagAnyWrap, Depth + 1);
- }
+ // If any of Add's ops are Adds or Muls with a constant, apply this
+ // transformation as well.
+ //
+ // TODO: There are some cases where this transformation is not
+ // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
+ // this transformation should be narrowed down.
+ const SCEV *Op0, *Op1;
+ if (match(Ops[1], m_scev_Add(m_SCEV(Op0), m_SCEV(Op1))) &&
+ containsConstantInAddMulChain(Ops[1])) {
+ const SCEV *LHS = getMulExpr(LHSC, Op0, SCEV::FlagAnyWrap, Depth + 1);
+ const SCEV *RHS = getMulExpr(LHSC, Op1, SCEV::FlagAnyWrap, Depth + 1);
+ return getAddExpr(LHS, RHS, SCEV::FlagAnyWrap, Depth + 1);
+ }
if (Ops[0]->isAllOnesValue()) {
// If we have a mul by -1 of an add, try distributing the -1 among the
@@ -3578,20 +3577,12 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
}
// ((-C + (C smax %x)) /u %x) evaluates to zero, for any positive constant C.
- if (const auto *AE = dyn_cast<SCEVAddExpr>(LHS);
- AE && AE->getNumOperands() == 2) {
- if (const auto *VC = dyn_cast<SCEVConstant>(AE->getOperand(0))) {
- const APInt &NegC = VC->getAPInt();
- if (NegC.isNegative() && !NegC.isMinSignedValue()) {
- const auto *MME = dyn_cast<SCEVSMaxExpr>(AE->getOperand(1));
- if (MME && MME->getNumOperands() == 2 &&
- isa<SCEVConstant>(MME->getOperand(0)) &&
- cast<SCEVConstant>(MME->getOperand(0))->getAPInt() == -NegC &&
- MME->getOperand(1) == RHS)
- return getZero(LHS->getType());
- }
- }
- }
+ const APInt *NegC, *C;
+ if (match(LHS,
+ m_scev_Add(m_scev_APInt(NegC),
+ m_scev_SMax(m_scev_APInt(C), m_scev_Specific(RHS)))) &&
+ NegC->isNegative() && !NegC->isMinSignedValue() && *C == -*NegC)
+ return getZero(LHS->getType());
// TODO: Generalize to handle any common factors.
// udiv (mul nuw a, vscale), (mul nuw b, vscale) --> udiv a, b
@@ -10791,19 +10782,15 @@ static bool HasSameValue(const SCEV *A, const SCEV *B) {
}
static bool MatchBinarySub(const SCEV *S, const SCEV *&LHS, const SCEV *&RHS) {
- const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S);
- if (!Add || Add->getNumOperands() != 2)
+ const SCEV *Op0, *Op1;
+ if (!match(S, m_scev_Add(m_SCEV(Op0), m_SCEV(Op1))))
return false;
- if (auto *ME = dyn_cast<SCEVMulExpr>(Add->getOperand(0));
- ME && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) {
- LHS = Add->getOperand(1);
- RHS = ME->getOperand(1);
+ if (match(Op0, m_scev_Mul(m_scev_AllOnes(), m_SCEV(RHS)))) {
+ LHS = Op1;
return true;
}
- if (auto *ME = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
- ME && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) {
- LHS = Add->getOperand(0);
- RHS = ME->getOperand(1);
+ if (match(Op1, m_scev_Mul(m_scev_AllOnes(), m_SCEV(RHS)))) {
+ LHS = Op0;
return true;
}
return false;
@@ -12166,13 +12153,10 @@ bool ScalarEvolution::isImpliedCondBalancedTypes(
bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr,
const SCEV *&L, const SCEV *&R,
SCEV::NoWrapFlags &Flags) {
- const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
- if (!AE || AE->getNumOperands() != 2)
+ if (!match(Expr, m_scev_Add(m_SCEV(L), m_SCEV(R))))
return false;
- L = AE->getOperand(0);
- R = AE->getOperand(1);
- Flags = AE->getNoWrapFlags();
+ Flags = cast<SCEVAddExpr>(Expr)->getNoWrapFlags();
return true;
}
@@ -15550,19 +15534,10 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
auto IsMinMaxSCEVWithNonNegativeConstant =
[&](const SCEV *Expr, SCEVTypes &SCTy, const SCEV *&LHS,
const SCEV *&RHS) {
- if (auto *MinMax = dyn_cast<SCEVMinMaxExpr>(Expr)) {
- if (MinMax->getNumOperands() != 2)
- return false;
- if (auto *C = dyn_cast<SCEVConstant>(MinMax->getOperand(0))) {
- if (C->getAPInt().isNegative())
- return false;
- SCTy = MinMax->getSCEVType();
- LHS = MinMax->getOperand(0);
- RHS = MinMax->getOperand(1);
- return true;
- }
- }
- return false;
+ const APInt *C;
+ SCTy = Expr->getSCEVType();
+ return match(Expr, m_scev_MinMax(m_SCEV(LHS), m_SCEV(RHS))) &&
+ match(LHS, m_scev_APInt(C)) && C->isNonNegative();
};
// Return a new SCEV that modifies \p Expr to the closest number divides by
@@ -15765,19 +15740,26 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
GetNextSCEVDividesByDivisor(One, DividesBy);
To = SE.getUMaxExpr(FromRewritten, OneAlignedUp);
} else {
+ // LHS != RHS can be rewritten as (LHS - RHS) = UMax(1, LHS - RHS),
+ // but creating the subtraction eagerly is expensive. Track the
+ // inequalities in a separate map, and materialize the rewrite lazily
+ // when encountering a suitable subtraction while re-writing.
if (LHS->getType()->isPointerTy()) {
LHS = SE.getLosslessPtrToIntExpr(LHS);
RHS = SE.getLosslessPtrToIntExpr(RHS);
if (isa<SCEVCouldNotCompute>(LHS) || isa<SCEVCouldNotCompute>(RHS))
break;
}
- auto AddSubRewrite = [&](const SCEV *A, const SCEV *B) {
- const SCEV *Sub = SE.getMinusSCEV(A, B);
- AddRewrite(Sub, Sub,
- SE.getUMaxExpr(Sub, SE.getOne(From->getType())));
- };
- AddSubRewrite(LHS, RHS);
- AddSubRewrite(RHS, LHS);
+ const SCEVConstant *C;
+ const SCEV *A, *B;
+ if (match(RHS, m_scev_Add(m_SCEVConstant(C), m_SCEV(A))) &&
+ match(LHS, m_scev_Add(m_scev_Specific(C), m_SCEV(B)))) {
+ RHS = A;
+ LHS = B;
+ }
+ if (LHS > RHS)
+ std::swap(LHS, RHS);
+ Guards.NotEqual.insert({LHS, RHS});
continue;
}
break;
@@ -15911,13 +15893,15 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
class SCEVLoopGuardRewriter
: public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
const DenseMap<const SCEV *, const SCEV *> &Map;
+ const SmallDenseSet<std::pair<const SCEV *, const SCEV *>> &NotEqual;
SCEV::NoWrapFlags FlagMask = SCEV::FlagAnyWrap;
public:
SCEVLoopGuardRewriter(ScalarEvolution &SE,
const ScalarEvolution::LoopGuards &Guards)
- : SCEVRewriteVisitor(SE), Map(Guards.RewriteMap) {
+ : SCEVRewriteVisitor(SE), Map(Guards.RewriteMap),
+ NotEqual(Guards.NotEqual) {
if (Guards.PreserveNUW)
FlagMask = ScalarEvolution::setFlags(FlagMask, SCEV::FlagNUW);
if (Guards.PreserveNSW)
@@ -15972,14 +15956,36 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
}
const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
+ // Helper to check if S is a subtraction (A - B) where A != B, and if so,
+ // return UMax(S, 1).
+ auto RewriteSubtraction = [&](const SCEV *S) -> const SCEV * {
+ const SCEV *LHS, *RHS;
+ if (MatchBinarySub(S, LHS, RHS)) {
+ if (LHS > RHS)
+ std::swap(LHS, RHS);
+ if (NotEqual.contains({LHS, RHS}))
+ return SE.getUMaxExpr(S, SE.getOne(S->getType()));
+ }
+ return nullptr;
+ };
+
+ // Check if Expr itself is a subtraction pattern with guard info.
+ if (const SCEV *Rewritten = RewriteSubtraction(Expr))
+ return Rewritten;
+
// Trip count expressions sometimes consist of adding 3 operands, i.e.
// (Const + A + B). There may be guard info for A + B, and if so, apply
// it.
// TODO: Could more generally apply guards to Add sub-expressions.
if (isa<SCEVConstant>(Expr->getOperand(0)) &&
Expr->getNumOperands() == 3) {
- if (const SCEV *S = Map.lookup(
- SE.getAddExpr(Expr->getOperand(1), Expr->getOperand(2))))
+ const SCEV *Add =
+ SE.getAddExpr(Expr->getOperand(1), Expr->getOperand(2));
+ if (const SCEV *Rewritten = RewriteSubtraction(Add))
+ return SE.getAddExpr(
+ Expr->getOperand(0), Rewritten,
+ ScalarEvolution::maskFlags(Expr->getNoWrapFlags(), FlagMask));
+ if (const SCEV *S = Map.lookup(Add))
return SE.getAddExpr(Expr->getOperand(0), S);
}
SmallVector<const SCEV *, 2> Operands;
@@ -16014,7 +16020,7 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
}
};
- if (RewriteMap.empty())
+ if (RewriteMap.empty() && NotEqual.empty())
return Expr;
SCEVLoopGuardRewriter Rewriter(SE, *this);
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 433877f..567acf7 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -1039,12 +1039,17 @@ void DwarfDebug::finishUnitAttributes(const DICompileUnit *DIUnit,
} else
NewCU.addString(Die, dwarf::DW_AT_producer, Producer);
- if (auto Lang = DIUnit->getSourceLanguage(); Lang.hasVersionedName())
+ if (auto Lang = DIUnit->getSourceLanguage(); Lang.hasVersionedName()) {
NewCU.addUInt(Die, dwarf::DW_AT_language_name, dwarf::DW_FORM_data2,
Lang.getName());
- else
+
+ if (uint32_t LangVersion = Lang.getVersion(); LangVersion != 0)
+ NewCU.addUInt(Die, dwarf::DW_AT_language_version, /*Form=*/std::nullopt,
+ LangVersion);
+ } else {
NewCU.addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
Lang.getName());
+ }
NewCU.addString(Die, dwarf::DW_AT_name, FN);
StringRef SysRoot = DIUnit->getSysRoot();
@@ -2066,11 +2071,36 @@ void DwarfDebug::beginInstruction(const MachineInstr *MI) {
if (NoDebug)
return;
+ auto RecordLineZero = [&]() {
+ // Preserve the file and column numbers, if we can, to save space in
+ // the encoded line table.
+ // Do not update PrevInstLoc, it remembers the last non-0 line.
+ const MDNode *Scope = nullptr;
+ unsigned Column = 0;
+ if (PrevInstLoc) {
+ Scope = PrevInstLoc.getScope();
+ Column = PrevInstLoc.getCol();
+ }
+ recordSourceLine(/*Line=*/0, Column, Scope, /*Flags=*/0);
+ };
+
+ // When we emit a line-0 record, we don't update PrevInstLoc; so look at
+ // the last line number actually emitted, to see if it was line 0.
+ unsigned LastAsmLine =
+ Asm->OutStreamer->getContext().getCurrentDwarfLoc().getLine();
+
// Check if source location changes, but ignore DBG_VALUE and CFI locations.
// If the instruction is part of the function frame setup code, do not emit
// any line record, as there is no correspondence with any user code.
- if (MI->isMetaInstruction() || MI->getFlag(MachineInstr::FrameSetup))
+ if (MI->isMetaInstruction())
+ return;
+ if (MI->getFlag(MachineInstr::FrameSetup)) {
+ // Prevent a loc from the previous block leaking into frame setup instrs.
+ if (LastAsmLine && PrevInstBB && PrevInstBB != MI->getParent())
+ RecordLineZero();
return;
+ }
+
const DebugLoc &DL = MI->getDebugLoc();
unsigned Flags = 0;
@@ -2093,11 +2123,6 @@ void DwarfDebug::beginInstruction(const MachineInstr *MI) {
LocationString);
};
- // When we emit a line-0 record, we don't update PrevInstLoc; so look at
- // the last line number actually emitted, to see if it was line 0.
- unsigned LastAsmLine =
- Asm->OutStreamer->getContext().getCurrentDwarfLoc().getLine();
-
// There may be a mixture of scopes using and not using Key Instructions.
// Not-Key-Instructions functions inlined into Key Instructions functions
// should use not-key is_stmt handling. Key Instructions functions inlined
@@ -2163,18 +2188,8 @@ void DwarfDebug::beginInstruction(const MachineInstr *MI) {
// - Instruction is at the top of a block; we don't want to inherit the
// location from the physically previous (maybe unrelated) block.
if (UnknownLocations == Enable || PrevLabel ||
- (PrevInstBB && PrevInstBB != MI->getParent())) {
- // Preserve the file and column numbers, if we can, to save space in
- // the encoded line table.
- // Do not update PrevInstLoc, it remembers the last non-0 line.
- const MDNode *Scope = nullptr;
- unsigned Column = 0;
- if (PrevInstLoc) {
- Scope = PrevInstLoc.getScope();
- Column = PrevInstLoc.getCol();
- }
- recordSourceLine(/*Line=*/0, Column, Scope, /*Flags=*/0);
- }
+ (PrevInstBB && PrevInstBB != MI->getParent()))
+ RecordLineZero();
return;
}
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index f28b989..d8374b6 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -6041,8 +6041,7 @@ std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
Triple T(TT);
// The only data layout upgrades needed for pre-GCN, SPIR or SPIRV are setting
// the address space of globals to 1. This does not apply to SPIRV Logical.
- if (((T.isAMDGPU() && !T.isAMDGCN()) ||
- (T.isSPIR() || (T.isSPIRV() && !T.isSPIRVLogical()))) &&
+ if ((T.isSPIR() || (T.isSPIRV() && !T.isSPIRVLogical())) &&
!DL.contains("-G") && !DL.starts_with("G")) {
return DL.empty() ? std::string("G1") : (DL + "-G1").str();
}
@@ -6055,35 +6054,43 @@ std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
return DL.str();
}
+ // AMDGPU data layout upgrades.
std::string Res = DL.str();
- // AMDGCN data layout upgrades.
- if (T.isAMDGCN()) {
+ if (T.isAMDGPU()) {
// Define address spaces for constants.
if (!DL.contains("-G") && !DL.starts_with("G"))
Res.append(Res.empty() ? "G1" : "-G1");
- // Add missing non-integral declarations.
- // This goes before adding new address spaces to prevent incoherent string
- // values.
- if (!DL.contains("-ni") && !DL.starts_with("ni"))
- Res.append("-ni:7:8:9");
- // Update ni:7 to ni:7:8:9.
- if (DL.ends_with("ni:7"))
- Res.append(":8:9");
- if (DL.ends_with("ni:7:8"))
- Res.append(":9");
-
- // Add sizing for address spaces 7 and 8 (fat raw buffers and buffer
- // resources) An empty data layout has already been upgraded to G1 by now.
- if (!DL.contains("-p7") && !DL.starts_with("p7"))
- Res.append("-p7:160:256:256:32");
- if (!DL.contains("-p8") && !DL.starts_with("p8"))
- Res.append("-p8:128:128:128:48");
- constexpr StringRef OldP8("-p8:128:128-");
- if (DL.contains(OldP8))
- Res.replace(Res.find(OldP8), OldP8.size(), "-p8:128:128:128:48-");
- if (!DL.contains("-p9") && !DL.starts_with("p9"))
- Res.append("-p9:192:256:256:32");
+ // AMDGCN data layout upgrades.
+ if (T.isAMDGCN()) {
+
+ // Add missing non-integral declarations.
+ // This goes before adding new address spaces to prevent incoherent string
+ // values.
+ if (!DL.contains("-ni") && !DL.starts_with("ni"))
+ Res.append("-ni:7:8:9");
+ // Update ni:7 to ni:7:8:9.
+ if (DL.ends_with("ni:7"))
+ Res.append(":8:9");
+ if (DL.ends_with("ni:7:8"))
+ Res.append(":9");
+
+ // Add sizing for address spaces 7 and 8 (fat raw buffers and buffer
+ // resources) An empty data layout has already been upgraded to G1 by now.
+ if (!DL.contains("-p7") && !DL.starts_with("p7"))
+ Res.append("-p7:160:256:256:32");
+ if (!DL.contains("-p8") && !DL.starts_with("p8"))
+ Res.append("-p8:128:128:128:48");
+ constexpr StringRef OldP8("-p8:128:128-");
+ if (DL.contains(OldP8))
+ Res.replace(Res.find(OldP8), OldP8.size(), "-p8:128:128:128:48-");
+ if (!DL.contains("-p9") && !DL.starts_with("p9"))
+ Res.append("-p9:192:256:256:32");
+ }
+
+ // Upgrade the ELF mangling mode.
+ if (!DL.contains("m:e"))
+ Res = Res.empty() ? "m:e" : "m:e-" + Res;
return Res;
}
diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp
index 9601a8a..5883606 100644
--- a/llvm/lib/IR/DebugInfo.cpp
+++ b/llvm/lib/IR/DebugInfo.cpp
@@ -294,9 +294,9 @@ void DebugInfoFinder::processSubprogram(DISubprogram *SP) {
// just DISubprogram's, referenced from anywhere within the Function being
// cloned prior to calling MapMetadata / RemapInstruction to avoid their
// duplication later as DICompileUnit's are also directly referenced by
- // llvm.dbg.cu list. Thefore we need to collect DICompileUnit's here as well.
- // Also, DICompileUnit's may reference DISubprogram's too and therefore need
- // to be at least looked through.
+ // llvm.dbg.cu list. Therefore we need to collect DICompileUnit's here as
+ // well. Also, DICompileUnit's may reference DISubprogram's too and therefore
+ // need to be at least looked through.
processCompileUnit(SP->getUnit());
processType(SP->getType());
for (auto *Element : SP->getTemplateParams()) {
@@ -377,7 +377,7 @@ bool DebugInfoFinder::addScope(DIScope *Scope) {
/// Recursively handle DILocations in followup metadata etc.
///
-/// TODO: If for example a followup loop metadata would refence itself this
+/// TODO: If for example a followup loop metadata would reference itself this
/// function would go into infinite recursion. We do not expect such cycles in
/// the loop metadata (except for the self-referencing first element
/// "LoopID"). However, we could at least handle such situations more gracefully
@@ -679,7 +679,7 @@ private:
auto Variables = nullptr;
auto TemplateParams = nullptr;
- // Make a distinct DISubprogram, for situations that warrent it.
+ // Make a distinct DISubprogram, for situations that warrant it.
auto distinctMDSubprogram = [&]() {
return DISubprogram::getDistinct(
MDS->getContext(), FileAndScope, MDS->getName(), LinkageName,
@@ -1095,6 +1095,35 @@ LLVMDIBuilderCreateFile(LLVMDIBuilderRef Builder, const char *Filename,
StringRef(Directory, DirectoryLen)));
}
+static llvm::DIFile::ChecksumKind
+map_from_llvmChecksumKind(LLVMChecksumKind CSKind) {
+ switch (CSKind) {
+ case LLVMChecksumKind::CSK_MD5:
+ return llvm::DIFile::CSK_MD5;
+ case LLVMChecksumKind::CSK_SHA1:
+ return llvm::DIFile::CSK_SHA1;
+ case LLVMChecksumKind::CSK_SHA256:
+ return llvm::DIFile::CSK_SHA256;
+ }
+ llvm_unreachable("Unhandled Checksum Kind");
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateFileWithChecksum(
+ LLVMDIBuilderRef Builder, const char *Filename, size_t FilenameLen,
+ const char *Directory, size_t DirectoryLen, LLVMChecksumKind ChecksumKind,
+ const char *Checksum, size_t ChecksumLen, const char *Source,
+ size_t SourceLen) {
+ StringRef ChkSum = StringRef(Checksum, ChecksumLen);
+ auto CSK = map_from_llvmChecksumKind(ChecksumKind);
+ llvm::DIFile::ChecksumInfo<StringRef> CSInfo(CSK, ChkSum);
+ std::optional<StringRef> Src;
+ if (SourceLen > 0)
+ Src = StringRef(Source, SourceLen);
+ return wrap(unwrap(Builder)->createFile(StringRef(Filename, FilenameLen),
+ StringRef(Directory, DirectoryLen),
+ CSInfo, Src));
+}
+
LLVMMetadataRef
LLVMDIBuilderCreateModule(LLVMDIBuilderRef Builder, LLVMMetadataRef ParentScope,
const char *Name, size_t NameLen,
@@ -2014,7 +2043,7 @@ void at::remapAssignID(DenseMap<DIAssignID *, DIAssignID *> &Map,
I.setMetadata(LLVMContext::MD_DIAssignID, GetNewID(ID));
}
-/// Collect constant properies (base, size, offset) of \p StoreDest.
+/// Collect constant properties (base, size, offset) of \p StoreDest.
/// Return std::nullopt if any properties are not constants or the
/// offset from the base pointer is negative.
static std::optional<AssignmentInfo>
@@ -2300,7 +2329,7 @@ PreservedAnalyses AssignmentTrackingPass::run(Function &F,
return PreservedAnalyses::all();
// Record that this module uses assignment tracking. It doesn't matter that
- // some functons in the module may not use it - the debug info in those
+ // some functions in the module may not use it - the debug info in those
// functions will still be handled properly.
setAssignmentTrackingModuleFlag(*F.getParent());
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index c79a950..3572852 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -6479,9 +6479,12 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
NumRows->getZExtValue() * NumColumns->getZExtValue(),
"Result of a matrix operation does not fit in the returned vector!");
- if (Stride)
+ if (Stride) {
+ Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
+ IF);
Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
"Stride must be greater or equal than the number of rows!", IF);
+ }
break;
}
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 53cf004..e45cac8 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -2027,13 +2027,13 @@ Error PassBuilder::parseModulePass(ModulePassManager &MPM,
#define LOOPNEST_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
MPM.addPass(createModuleToFunctionPassAdaptor( \
- createFunctionToLoopPassAdaptor(CREATE_PASS, false, false))); \
+ createFunctionToLoopPassAdaptor(CREATE_PASS, false))); \
return Error::success(); \
}
#define LOOP_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
MPM.addPass(createModuleToFunctionPassAdaptor( \
- createFunctionToLoopPassAdaptor(CREATE_PASS, false, false))); \
+ createFunctionToLoopPassAdaptor(CREATE_PASS, false))); \
return Error::success(); \
}
#define LOOP_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \
@@ -2041,9 +2041,8 @@ Error PassBuilder::parseModulePass(ModulePassManager &MPM,
auto Params = parsePassParameters(PARSER, Name, NAME); \
if (!Params) \
return Params.takeError(); \
- MPM.addPass( \
- createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( \
- CREATE_PASS(Params.get()), false, false))); \
+ MPM.addPass(createModuleToFunctionPassAdaptor( \
+ createFunctionToLoopPassAdaptor(CREATE_PASS(Params.get()), false))); \
return Error::success(); \
}
#include "PassRegistry.def"
@@ -2142,13 +2141,13 @@ Error PassBuilder::parseCGSCCPass(CGSCCPassManager &CGPM,
#define LOOPNEST_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
CGPM.addPass(createCGSCCToFunctionPassAdaptor( \
- createFunctionToLoopPassAdaptor(CREATE_PASS, false, false))); \
+ createFunctionToLoopPassAdaptor(CREATE_PASS, false))); \
return Error::success(); \
}
#define LOOP_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
CGPM.addPass(createCGSCCToFunctionPassAdaptor( \
- createFunctionToLoopPassAdaptor(CREATE_PASS, false, false))); \
+ createFunctionToLoopPassAdaptor(CREATE_PASS, false))); \
return Error::success(); \
}
#define LOOP_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \
@@ -2156,9 +2155,8 @@ Error PassBuilder::parseCGSCCPass(CGSCCPassManager &CGPM,
auto Params = parsePassParameters(PARSER, Name, NAME); \
if (!Params) \
return Params.takeError(); \
- CGPM.addPass( \
- createCGSCCToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( \
- CREATE_PASS(Params.get()), false, false))); \
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor( \
+ createFunctionToLoopPassAdaptor(CREATE_PASS(Params.get()), false))); \
return Error::success(); \
}
#include "PassRegistry.def"
@@ -2191,11 +2189,8 @@ Error PassBuilder::parseFunctionPass(FunctionPassManager &FPM,
return Err;
// Add the nested pass manager with the appropriate adaptor.
bool UseMemorySSA = (Name == "loop-mssa");
- bool UseBFI = llvm::any_of(InnerPipeline, [](auto Pipeline) {
- return Pipeline.Name.contains("simple-loop-unswitch");
- });
- FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM), UseMemorySSA,
- UseBFI));
+ FPM.addPass(
+ createFunctionToLoopPassAdaptor(std::move(LPM), UseMemorySSA));
return Error::success();
}
if (Name == "machine-function") {
@@ -2248,12 +2243,12 @@ Error PassBuilder::parseFunctionPass(FunctionPassManager &FPM,
// The risk is that it may become obsolete if we're not careful.
#define LOOPNEST_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
- FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS, false, false)); \
+ FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS, false)); \
return Error::success(); \
}
#define LOOP_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
- FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS, false, false)); \
+ FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS, false)); \
return Error::success(); \
}
#define LOOP_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \
@@ -2261,8 +2256,8 @@ Error PassBuilder::parseFunctionPass(FunctionPassManager &FPM,
auto Params = parsePassParameters(PARSER, Name, NAME); \
if (!Params) \
return Params.takeError(); \
- FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS(Params.get()), \
- false, false)); \
+ FPM.addPass( \
+ createFunctionToLoopPassAdaptor(CREATE_PASS(Params.get()), false)); \
return Error::success(); \
}
#include "PassRegistry.def"
diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index 3f3939eaf..bd03ac0 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -519,16 +519,14 @@ PassBuilder::buildO1FunctionSimplificationPipeline(OptimizationLevel Level,
invokeLoopOptimizerEndEPCallbacks(LPM2, Level);
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM1),
- /*UseMemorySSA=*/true,
- /*UseBlockFrequencyInfo=*/true));
+ /*UseMemorySSA=*/true));
FPM.addPass(
SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
FPM.addPass(InstCombinePass());
// The loop passes in LPM2 (LoopFullUnrollPass) do not preserve MemorySSA.
// *All* loop passes must preserve it, in order to be able to use it.
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM2),
- /*UseMemorySSA=*/false,
- /*UseBlockFrequencyInfo=*/false));
+ /*UseMemorySSA=*/false));
// Delete small array after loop unroll.
FPM.addPass(SROAPass(SROAOptions::ModifyCFG));
@@ -710,8 +708,7 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
invokeLoopOptimizerEndEPCallbacks(LPM2, Level);
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM1),
- /*UseMemorySSA=*/true,
- /*UseBlockFrequencyInfo=*/true));
+ /*UseMemorySSA=*/true));
FPM.addPass(
SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
FPM.addPass(InstCombinePass());
@@ -719,8 +716,7 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
// LoopDeletionPass and LoopFullUnrollPass) do not preserve MemorySSA.
// *All* loop passes must preserve it, in order to be able to use it.
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM2),
- /*UseMemorySSA=*/false,
- /*UseBlockFrequencyInfo=*/false));
+ /*UseMemorySSA=*/false));
// Delete small array after loop unroll.
FPM.addPass(SROAPass(SROAOptions::ModifyCFG));
@@ -773,7 +769,7 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
FPM.addPass(createFunctionToLoopPassAdaptor(
LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/true),
- /*UseMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
+ /*UseMemorySSA=*/true));
FPM.addPass(CoroElidePass());
@@ -842,8 +838,7 @@ void PassBuilder::addPostPGOLoopRotation(ModulePassManager &MPM,
createFunctionToLoopPassAdaptor(
LoopRotatePass(EnableLoopHeaderDuplication ||
Level != OptimizationLevel::Oz),
- /*UseMemorySSA=*/false,
- /*UseBlockFrequencyInfo=*/false),
+ /*UseMemorySSA=*/false),
PTO.EagerlyInvalidateAnalyses));
}
}
@@ -1358,8 +1353,7 @@ void PassBuilder::addVectorPasses(OptimizationLevel Level,
LPM.addPass(SimpleLoopUnswitchPass(/* NonTrivial */ Level ==
OptimizationLevel::O3));
ExtraPasses.addPass(
- createFunctionToLoopPassAdaptor(std::move(LPM), /*UseMemorySSA=*/true,
- /*UseBlockFrequencyInfo=*/true));
+ createFunctionToLoopPassAdaptor(std::move(LPM), /*UseMemorySSA=*/true));
ExtraPasses.addPass(
SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
ExtraPasses.addPass(InstCombinePass());
@@ -1438,7 +1432,7 @@ void PassBuilder::addVectorPasses(OptimizationLevel Level,
FPM.addPass(createFunctionToLoopPassAdaptor(
LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/true),
- /*UseMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
+ /*UseMemorySSA=*/true));
// Now that we've vectorized and unrolled loops, we may have more refined
// alignment information, try to re-derive it here.
@@ -1520,7 +1514,7 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level,
OptimizePM.addPass(createFunctionToLoopPassAdaptor(
LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/true),
- /*USeMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
+ /*USeMemorySSA=*/true));
}
OptimizePM.addPass(Float2IntPass());
@@ -1560,8 +1554,8 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level,
if (PTO.LoopInterchange)
LPM.addPass(LoopInterchangePass());
- OptimizePM.addPass(createFunctionToLoopPassAdaptor(
- std::move(LPM), /*UseMemorySSA=*/false, /*UseBlockFrequencyInfo=*/false));
+ OptimizePM.addPass(
+ createFunctionToLoopPassAdaptor(std::move(LPM), /*UseMemorySSA=*/false));
// FIXME: This may not be the right place in the pipeline.
// We need to have the data to support the right place.
@@ -2133,7 +2127,7 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
MainFPM.addPass(createFunctionToLoopPassAdaptor(
LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/true),
- /*USeMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
+ /*USeMemorySSA=*/true));
if (RunNewGVN)
MainFPM.addPass(NewGVNPass());
@@ -2163,8 +2157,8 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
PTO.ForgetAllSCEVInLoopUnroll));
// The loop passes in LPM (LoopFullUnrollPass) do not preserve MemorySSA.
// *All* loop passes must preserve it, in order to be able to use it.
- MainFPM.addPass(createFunctionToLoopPassAdaptor(
- std::move(LPM), /*UseMemorySSA=*/false, /*UseBlockFrequencyInfo=*/true));
+ MainFPM.addPass(
+ createFunctionToLoopPassAdaptor(std::move(LPM), /*UseMemorySSA=*/false));
MainFPM.addPass(LoopDistributePass());
diff --git a/llvm/lib/Remarks/BitstreamRemarkParser.h b/llvm/lib/Remarks/BitstreamRemarkParser.h
index 4f66c47..914edd8 100644
--- a/llvm/lib/Remarks/BitstreamRemarkParser.h
+++ b/llvm/lib/Remarks/BitstreamRemarkParser.h
@@ -112,7 +112,7 @@ public:
/// Helper to parse a META_BLOCK for a bitstream remark container.
class BitstreamMetaParserHelper
: public BitstreamBlockParserHelper<BitstreamMetaParserHelper> {
- friend class BitstreamBlockParserHelper;
+ friend class BitstreamBlockParserHelper<BitstreamMetaParserHelper>;
public:
struct ContainerInfo {
@@ -137,7 +137,7 @@ protected:
/// Helper to parse a REMARK_BLOCK for a bitstream remark container.
class BitstreamRemarkParserHelper
: public BitstreamBlockParserHelper<BitstreamRemarkParserHelper> {
- friend class BitstreamBlockParserHelper;
+ friend class BitstreamBlockParserHelper<BitstreamRemarkParserHelper>;
protected:
SmallVector<uint64_t, 5> Record;
diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp
index 8623c06..b4de79a 100644
--- a/llvm/lib/Support/APFloat.cpp
+++ b/llvm/lib/Support/APFloat.cpp
@@ -130,44 +130,46 @@ struct fltSemantics {
bool hasSignBitInMSB = true;
};
-static constexpr fltSemantics semIEEEhalf = {15, -14, 11, 16};
-static constexpr fltSemantics semBFloat = {127, -126, 8, 16};
-static constexpr fltSemantics semIEEEsingle = {127, -126, 24, 32};
-static constexpr fltSemantics semIEEEdouble = {1023, -1022, 53, 64};
-static constexpr fltSemantics semIEEEquad = {16383, -16382, 113, 128};
-static constexpr fltSemantics semFloat8E5M2 = {15, -14, 3, 8};
-static constexpr fltSemantics semFloat8E5M2FNUZ = {
+constexpr fltSemantics APFloatBase::semIEEEhalf = {15, -14, 11, 16};
+constexpr fltSemantics APFloatBase::semBFloat = {127, -126, 8, 16};
+constexpr fltSemantics APFloatBase::semIEEEsingle = {127, -126, 24, 32};
+constexpr fltSemantics APFloatBase::semIEEEdouble = {1023, -1022, 53, 64};
+constexpr fltSemantics APFloatBase::semIEEEquad = {16383, -16382, 113, 128};
+constexpr fltSemantics APFloatBase::semFloat8E5M2 = {15, -14, 3, 8};
+constexpr fltSemantics APFloatBase::semFloat8E5M2FNUZ = {
15, -15, 3, 8, fltNonfiniteBehavior::NanOnly, fltNanEncoding::NegativeZero};
-static constexpr fltSemantics semFloat8E4M3 = {7, -6, 4, 8};
-static constexpr fltSemantics semFloat8E4M3FN = {
+constexpr fltSemantics APFloatBase::semFloat8E4M3 = {7, -6, 4, 8};
+constexpr fltSemantics APFloatBase::semFloat8E4M3FN = {
8, -6, 4, 8, fltNonfiniteBehavior::NanOnly, fltNanEncoding::AllOnes};
-static constexpr fltSemantics semFloat8E4M3FNUZ = {
+constexpr fltSemantics APFloatBase::semFloat8E4M3FNUZ = {
7, -7, 4, 8, fltNonfiniteBehavior::NanOnly, fltNanEncoding::NegativeZero};
-static constexpr fltSemantics semFloat8E4M3B11FNUZ = {
+constexpr fltSemantics APFloatBase::semFloat8E4M3B11FNUZ = {
4, -10, 4, 8, fltNonfiniteBehavior::NanOnly, fltNanEncoding::NegativeZero};
-static constexpr fltSemantics semFloat8E3M4 = {3, -2, 5, 8};
-static constexpr fltSemantics semFloatTF32 = {127, -126, 11, 19};
-static constexpr fltSemantics semFloat8E8M0FNU = {127,
- -127,
- 1,
- 8,
- fltNonfiniteBehavior::NanOnly,
- fltNanEncoding::AllOnes,
- false,
- false,
- false};
-
-static constexpr fltSemantics semFloat6E3M2FN = {
+constexpr fltSemantics APFloatBase::semFloat8E3M4 = {3, -2, 5, 8};
+constexpr fltSemantics APFloatBase::semFloatTF32 = {127, -126, 11, 19};
+constexpr fltSemantics APFloatBase::semFloat8E8M0FNU = {
+ 127,
+ -127,
+ 1,
+ 8,
+ fltNonfiniteBehavior::NanOnly,
+ fltNanEncoding::AllOnes,
+ false,
+ false,
+ false};
+
+constexpr fltSemantics APFloatBase::semFloat6E3M2FN = {
4, -2, 3, 6, fltNonfiniteBehavior::FiniteOnly};
-static constexpr fltSemantics semFloat6E2M3FN = {
+constexpr fltSemantics APFloatBase::semFloat6E2M3FN = {
2, 0, 4, 6, fltNonfiniteBehavior::FiniteOnly};
-static constexpr fltSemantics semFloat4E2M1FN = {
+constexpr fltSemantics APFloatBase::semFloat4E2M1FN = {
2, 0, 2, 4, fltNonfiniteBehavior::FiniteOnly};
-static constexpr fltSemantics semX87DoubleExtended = {16383, -16382, 64, 80};
-static constexpr fltSemantics semBogus = {0, 0, 0, 0};
-static constexpr fltSemantics semPPCDoubleDouble = {-1, 0, 0, 128};
-static constexpr fltSemantics semPPCDoubleDoubleLegacy = {1023, -1022 + 53,
- 53 + 53, 128};
+constexpr fltSemantics APFloatBase::semX87DoubleExtended = {16383, -16382, 64,
+ 80};
+constexpr fltSemantics APFloatBase::semBogus = {0, 0, 0, 0};
+constexpr fltSemantics APFloatBase::semPPCDoubleDouble = {-1, 0, 0, 128};
+constexpr fltSemantics APFloatBase::semPPCDoubleDoubleLegacy = {
+ 1023, -1022 + 53, 53 + 53, 128};
const llvm::fltSemantics &APFloatBase::EnumToSemantics(Semantics S) {
switch (S) {
@@ -261,36 +263,6 @@ APFloatBase::SemanticsToEnum(const llvm::fltSemantics &Sem) {
llvm_unreachable("Unknown floating semantics");
}
-const fltSemantics &APFloatBase::IEEEhalf() { return semIEEEhalf; }
-const fltSemantics &APFloatBase::BFloat() { return semBFloat; }
-const fltSemantics &APFloatBase::IEEEsingle() { return semIEEEsingle; }
-const fltSemantics &APFloatBase::IEEEdouble() { return semIEEEdouble; }
-const fltSemantics &APFloatBase::IEEEquad() { return semIEEEquad; }
-const fltSemantics &APFloatBase::PPCDoubleDouble() {
- return semPPCDoubleDouble;
-}
-const fltSemantics &APFloatBase::PPCDoubleDoubleLegacy() {
- return semPPCDoubleDoubleLegacy;
-}
-const fltSemantics &APFloatBase::Float8E5M2() { return semFloat8E5M2; }
-const fltSemantics &APFloatBase::Float8E5M2FNUZ() { return semFloat8E5M2FNUZ; }
-const fltSemantics &APFloatBase::Float8E4M3() { return semFloat8E4M3; }
-const fltSemantics &APFloatBase::Float8E4M3FN() { return semFloat8E4M3FN; }
-const fltSemantics &APFloatBase::Float8E4M3FNUZ() { return semFloat8E4M3FNUZ; }
-const fltSemantics &APFloatBase::Float8E4M3B11FNUZ() {
- return semFloat8E4M3B11FNUZ;
-}
-const fltSemantics &APFloatBase::Float8E3M4() { return semFloat8E3M4; }
-const fltSemantics &APFloatBase::FloatTF32() { return semFloatTF32; }
-const fltSemantics &APFloatBase::Float8E8M0FNU() { return semFloat8E8M0FNU; }
-const fltSemantics &APFloatBase::Float6E3M2FN() { return semFloat6E3M2FN; }
-const fltSemantics &APFloatBase::Float6E2M3FN() { return semFloat6E2M3FN; }
-const fltSemantics &APFloatBase::Float4E2M1FN() { return semFloat4E2M1FN; }
-const fltSemantics &APFloatBase::x87DoubleExtended() {
- return semX87DoubleExtended;
-}
-const fltSemantics &APFloatBase::Bogus() { return semBogus; }
-
bool APFloatBase::isRepresentableBy(const fltSemantics &A,
const fltSemantics &B) {
return A.maxExponent <= B.maxExponent && A.minExponent >= B.minExponent &&
@@ -1029,7 +1001,7 @@ void IEEEFloat::makeNaN(bool SNaN, bool Negative, const APInt *fill) {
// For x87 extended precision, we want to make a NaN, not a
// pseudo-NaN. Maybe we should expose the ability to make
// pseudo-NaNs?
- if (semantics == &semX87DoubleExtended)
+ if (semantics == &APFloatBase::semX87DoubleExtended)
APInt::tcSetBit(significand, QNaNBit + 1);
}
@@ -1054,7 +1026,7 @@ IEEEFloat &IEEEFloat::operator=(IEEEFloat &&rhs) {
category = rhs.category;
sign = rhs.sign;
- rhs.semantics = &semBogus;
+ rhs.semantics = &APFloatBase::semBogus;
return *this;
}
@@ -1247,7 +1219,7 @@ IEEEFloat::IEEEFloat(const IEEEFloat &rhs) {
assign(rhs);
}
-IEEEFloat::IEEEFloat(IEEEFloat &&rhs) : semantics(&semBogus) {
+IEEEFloat::IEEEFloat(IEEEFloat &&rhs) : semantics(&APFloatBase::semBogus) {
*this = std::move(rhs);
}
@@ -2607,8 +2579,8 @@ APFloat::opStatus IEEEFloat::convert(const fltSemantics &toSemantics,
shift = toSemantics.precision - fromSemantics.precision;
bool X86SpecialNan = false;
- if (&fromSemantics == &semX87DoubleExtended &&
- &toSemantics != &semX87DoubleExtended && category == fcNaN &&
+ if (&fromSemantics == &APFloatBase::semX87DoubleExtended &&
+ &toSemantics != &APFloatBase::semX87DoubleExtended && category == fcNaN &&
(!(*significandParts() & 0x8000000000000000ULL) ||
!(*significandParts() & 0x4000000000000000ULL))) {
// x86 has some unusual NaNs which cannot be represented in any other
@@ -2694,7 +2666,7 @@ APFloat::opStatus IEEEFloat::convert(const fltSemantics &toSemantics,
// For x87 extended precision, we want to make a NaN, not a special NaN if
// the input wasn't special either.
- if (!X86SpecialNan && semantics == &semX87DoubleExtended)
+ if (!X86SpecialNan && semantics == &APFloatBase::semX87DoubleExtended)
APInt::tcSetBit(significandParts(), semantics->precision - 1);
// Convert of sNaN creates qNaN and raises an exception (invalid op).
@@ -3530,7 +3502,8 @@ hash_code hash_value(const IEEEFloat &Arg) {
// the actual IEEE respresentations. We compensate for that here.
APInt IEEEFloat::convertF80LongDoubleAPFloatToAPInt() const {
- assert(semantics == (const llvm::fltSemantics*)&semX87DoubleExtended);
+ assert(semantics ==
+ (const llvm::fltSemantics *)&APFloatBase::semX87DoubleExtended);
assert(partCount()==2);
uint64_t myexponent, mysignificand;
@@ -3560,7 +3533,8 @@ APInt IEEEFloat::convertF80LongDoubleAPFloatToAPInt() const {
}
APInt IEEEFloat::convertPPCDoubleDoubleLegacyAPFloatToAPInt() const {
- assert(semantics == (const llvm::fltSemantics *)&semPPCDoubleDoubleLegacy);
+ assert(semantics ==
+ (const llvm::fltSemantics *)&APFloatBase::semPPCDoubleDoubleLegacy);
assert(partCount()==2);
uint64_t words[2];
@@ -3574,14 +3548,14 @@ APInt IEEEFloat::convertPPCDoubleDoubleLegacyAPFloatToAPInt() const {
// Declare fltSemantics before APFloat that uses it (and
// saves pointer to it) to ensure correct destruction order.
fltSemantics extendedSemantics = *semantics;
- extendedSemantics.minExponent = semIEEEdouble.minExponent;
+ extendedSemantics.minExponent = APFloatBase::semIEEEdouble.minExponent;
IEEEFloat extended(*this);
fs = extended.convert(extendedSemantics, rmNearestTiesToEven, &losesInfo);
assert(fs == opOK && !losesInfo);
(void)fs;
IEEEFloat u(extended);
- fs = u.convert(semIEEEdouble, rmNearestTiesToEven, &losesInfo);
+ fs = u.convert(APFloatBase::semIEEEdouble, rmNearestTiesToEven, &losesInfo);
assert(fs == opOK || fs == opInexact);
(void)fs;
words[0] = *u.convertDoubleAPFloatToAPInt().getRawData();
@@ -3597,7 +3571,7 @@ APInt IEEEFloat::convertPPCDoubleDoubleLegacyAPFloatToAPInt() const {
IEEEFloat v(extended);
v.subtract(u, rmNearestTiesToEven);
- fs = v.convert(semIEEEdouble, rmNearestTiesToEven, &losesInfo);
+ fs = v.convert(APFloatBase::semIEEEdouble, rmNearestTiesToEven, &losesInfo);
assert(fs == opOK && !losesInfo);
(void)fs;
words[1] = *v.convertDoubleAPFloatToAPInt().getRawData();
@@ -3611,8 +3585,9 @@ APInt IEEEFloat::convertPPCDoubleDoubleLegacyAPFloatToAPInt() const {
template <const fltSemantics &S>
APInt IEEEFloat::convertIEEEFloatToAPInt() const {
assert(semantics == &S);
- const int bias =
- (semantics == &semFloat8E8M0FNU) ? -S.minExponent : -(S.minExponent - 1);
+ const int bias = (semantics == &APFloatBase::semFloat8E8M0FNU)
+ ? -S.minExponent
+ : -(S.minExponent - 1);
constexpr unsigned int trailing_significand_bits = S.precision - 1;
constexpr int integer_bit_part = trailing_significand_bits / integerPartWidth;
constexpr integerPart integer_bit =
@@ -3677,87 +3652,87 @@ APInt IEEEFloat::convertIEEEFloatToAPInt() const {
APInt IEEEFloat::convertQuadrupleAPFloatToAPInt() const {
assert(partCount() == 2);
- return convertIEEEFloatToAPInt<semIEEEquad>();
+ return convertIEEEFloatToAPInt<APFloatBase::semIEEEquad>();
}
APInt IEEEFloat::convertDoubleAPFloatToAPInt() const {
assert(partCount()==1);
- return convertIEEEFloatToAPInt<semIEEEdouble>();
+ return convertIEEEFloatToAPInt<APFloatBase::semIEEEdouble>();
}
APInt IEEEFloat::convertFloatAPFloatToAPInt() const {
assert(partCount()==1);
- return convertIEEEFloatToAPInt<semIEEEsingle>();
+ return convertIEEEFloatToAPInt<APFloatBase::semIEEEsingle>();
}
APInt IEEEFloat::convertBFloatAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semBFloat>();
+ return convertIEEEFloatToAPInt<APFloatBase::semBFloat>();
}
APInt IEEEFloat::convertHalfAPFloatToAPInt() const {
assert(partCount()==1);
- return convertIEEEFloatToAPInt<semIEEEhalf>();
+ return convertIEEEFloatToAPInt<APFloatBase::APFloatBase::semIEEEhalf>();
}
APInt IEEEFloat::convertFloat8E5M2APFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E5M2>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E5M2>();
}
APInt IEEEFloat::convertFloat8E5M2FNUZAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E5M2FNUZ>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E5M2FNUZ>();
}
APInt IEEEFloat::convertFloat8E4M3APFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E4M3>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E4M3>();
}
APInt IEEEFloat::convertFloat8E4M3FNAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E4M3FN>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E4M3FN>();
}
APInt IEEEFloat::convertFloat8E4M3FNUZAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E4M3FNUZ>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E4M3FNUZ>();
}
APInt IEEEFloat::convertFloat8E4M3B11FNUZAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E4M3B11FNUZ>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E4M3B11FNUZ>();
}
APInt IEEEFloat::convertFloat8E3M4APFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E3M4>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E3M4>();
}
APInt IEEEFloat::convertFloatTF32APFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloatTF32>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloatTF32>();
}
APInt IEEEFloat::convertFloat8E8M0FNUAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E8M0FNU>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E8M0FNU>();
}
APInt IEEEFloat::convertFloat6E3M2FNAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat6E3M2FN>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat6E3M2FN>();
}
APInt IEEEFloat::convertFloat6E2M3FNAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat6E2M3FN>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat6E2M3FN>();
}
APInt IEEEFloat::convertFloat4E2M1FNAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat4E2M1FN>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat4E2M1FN>();
}
// This function creates an APInt that is just a bit map of the floating
@@ -3765,74 +3740,77 @@ APInt IEEEFloat::convertFloat4E2M1FNAPFloatToAPInt() const {
// and treating the result as a normal integer is unlikely to be useful.
APInt IEEEFloat::bitcastToAPInt() const {
- if (semantics == (const llvm::fltSemantics*)&semIEEEhalf)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEhalf)
return convertHalfAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semBFloat)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semBFloat)
return convertBFloatAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics*)&semIEEEsingle)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEsingle)
return convertFloatAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics*)&semIEEEdouble)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEdouble)
return convertDoubleAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics*)&semIEEEquad)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEquad)
return convertQuadrupleAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semPPCDoubleDoubleLegacy)
+ if (semantics ==
+ (const llvm::fltSemantics *)&APFloatBase::semPPCDoubleDoubleLegacy)
return convertPPCDoubleDoubleLegacyAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E5M2)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E5M2)
return convertFloat8E5M2APFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E5M2FNUZ)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E5M2FNUZ)
return convertFloat8E5M2FNUZAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E4M3)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E4M3)
return convertFloat8E4M3APFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E4M3FN)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E4M3FN)
return convertFloat8E4M3FNAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E4M3FNUZ)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E4M3FNUZ)
return convertFloat8E4M3FNUZAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E4M3B11FNUZ)
+ if (semantics ==
+ (const llvm::fltSemantics *)&APFloatBase::semFloat8E4M3B11FNUZ)
return convertFloat8E4M3B11FNUZAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E3M4)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E3M4)
return convertFloat8E3M4APFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloatTF32)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloatTF32)
return convertFloatTF32APFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E8M0FNU)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E8M0FNU)
return convertFloat8E8M0FNUAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat6E3M2FN)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat6E3M2FN)
return convertFloat6E3M2FNAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat6E2M3FN)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat6E2M3FN)
return convertFloat6E2M3FNAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat4E2M1FN)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat4E2M1FN)
return convertFloat4E2M1FNAPFloatToAPInt();
- assert(semantics == (const llvm::fltSemantics*)&semX87DoubleExtended &&
+ assert(semantics ==
+ (const llvm::fltSemantics *)&APFloatBase::semX87DoubleExtended &&
"unknown format!");
return convertF80LongDoubleAPFloatToAPInt();
}
float IEEEFloat::convertToFloat() const {
- assert(semantics == (const llvm::fltSemantics*)&semIEEEsingle &&
+ assert(semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEsingle &&
"Float semantics are not IEEEsingle");
APInt api = bitcastToAPInt();
return api.bitsToFloat();
}
double IEEEFloat::convertToDouble() const {
- assert(semantics == (const llvm::fltSemantics*)&semIEEEdouble &&
+ assert(semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEdouble &&
"Float semantics are not IEEEdouble");
APInt api = bitcastToAPInt();
return api.bitsToDouble();
@@ -3840,7 +3818,7 @@ double IEEEFloat::convertToDouble() const {
#ifdef HAS_IEE754_FLOAT128
float128 IEEEFloat::convertToQuad() const {
- assert(semantics == (const llvm::fltSemantics *)&semIEEEquad &&
+ assert(semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEquad &&
"Float semantics are not IEEEquads");
APInt api = bitcastToAPInt();
return api.bitsToQuad();
@@ -3861,7 +3839,7 @@ void IEEEFloat::initFromF80LongDoubleAPInt(const APInt &api) {
uint64_t mysignificand = i1;
uint8_t myintegerbit = mysignificand >> 63;
- initialize(&semX87DoubleExtended);
+ initialize(&APFloatBase::semX87DoubleExtended);
assert(partCount()==2);
sign = static_cast<unsigned int>(i2>>15);
@@ -3893,14 +3871,16 @@ void IEEEFloat::initFromPPCDoubleDoubleLegacyAPInt(const APInt &api) {
// Get the first double and convert to our format.
initFromDoubleAPInt(APInt(64, i1));
- fs = convert(semPPCDoubleDoubleLegacy, rmNearestTiesToEven, &losesInfo);
+ fs = convert(APFloatBase::semPPCDoubleDoubleLegacy, rmNearestTiesToEven,
+ &losesInfo);
assert(fs == opOK && !losesInfo);
(void)fs;
// Unless we have a special case, add in second double.
if (isFiniteNonZero()) {
- IEEEFloat v(semIEEEdouble, APInt(64, i2));
- fs = v.convert(semPPCDoubleDoubleLegacy, rmNearestTiesToEven, &losesInfo);
+ IEEEFloat v(APFloatBase::semIEEEdouble, APInt(64, i2));
+ fs = v.convert(APFloatBase::semPPCDoubleDoubleLegacy, rmNearestTiesToEven,
+ &losesInfo);
assert(fs == opOK && !losesInfo);
(void)fs;
@@ -3918,7 +3898,7 @@ void IEEEFloat::initFromFloat8E8M0FNUAPInt(const APInt &api) {
uint64_t val = api.getRawData()[0];
uint64_t myexponent = (val & exponent_mask);
- initialize(&semFloat8E8M0FNU);
+ initialize(&APFloatBase::semFloat8E8M0FNU);
assert(partCount() == 1);
// This format has unsigned representation only
@@ -4025,109 +4005,109 @@ void IEEEFloat::initFromIEEEAPInt(const APInt &api) {
}
void IEEEFloat::initFromQuadrupleAPInt(const APInt &api) {
- initFromIEEEAPInt<semIEEEquad>(api);
+ initFromIEEEAPInt<APFloatBase::semIEEEquad>(api);
}
void IEEEFloat::initFromDoubleAPInt(const APInt &api) {
- initFromIEEEAPInt<semIEEEdouble>(api);
+ initFromIEEEAPInt<APFloatBase::semIEEEdouble>(api);
}
void IEEEFloat::initFromFloatAPInt(const APInt &api) {
- initFromIEEEAPInt<semIEEEsingle>(api);
+ initFromIEEEAPInt<APFloatBase::semIEEEsingle>(api);
}
void IEEEFloat::initFromBFloatAPInt(const APInt &api) {
- initFromIEEEAPInt<semBFloat>(api);
+ initFromIEEEAPInt<APFloatBase::semBFloat>(api);
}
void IEEEFloat::initFromHalfAPInt(const APInt &api) {
- initFromIEEEAPInt<semIEEEhalf>(api);
+ initFromIEEEAPInt<APFloatBase::semIEEEhalf>(api);
}
void IEEEFloat::initFromFloat8E5M2APInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E5M2>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E5M2>(api);
}
void IEEEFloat::initFromFloat8E5M2FNUZAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E5M2FNUZ>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E5M2FNUZ>(api);
}
void IEEEFloat::initFromFloat8E4M3APInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E4M3>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E4M3>(api);
}
void IEEEFloat::initFromFloat8E4M3FNAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E4M3FN>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E4M3FN>(api);
}
void IEEEFloat::initFromFloat8E4M3FNUZAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E4M3FNUZ>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E4M3FNUZ>(api);
}
void IEEEFloat::initFromFloat8E4M3B11FNUZAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E4M3B11FNUZ>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E4M3B11FNUZ>(api);
}
void IEEEFloat::initFromFloat8E3M4APInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E3M4>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E3M4>(api);
}
void IEEEFloat::initFromFloatTF32APInt(const APInt &api) {
- initFromIEEEAPInt<semFloatTF32>(api);
+ initFromIEEEAPInt<APFloatBase::semFloatTF32>(api);
}
void IEEEFloat::initFromFloat6E3M2FNAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat6E3M2FN>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat6E3M2FN>(api);
}
void IEEEFloat::initFromFloat6E2M3FNAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat6E2M3FN>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat6E2M3FN>(api);
}
void IEEEFloat::initFromFloat4E2M1FNAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat4E2M1FN>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat4E2M1FN>(api);
}
/// Treat api as containing the bits of a floating point number.
void IEEEFloat::initFromAPInt(const fltSemantics *Sem, const APInt &api) {
assert(api.getBitWidth() == Sem->sizeInBits);
- if (Sem == &semIEEEhalf)
+ if (Sem == &APFloatBase::semIEEEhalf)
return initFromHalfAPInt(api);
- if (Sem == &semBFloat)
+ if (Sem == &APFloatBase::semBFloat)
return initFromBFloatAPInt(api);
- if (Sem == &semIEEEsingle)
+ if (Sem == &APFloatBase::semIEEEsingle)
return initFromFloatAPInt(api);
- if (Sem == &semIEEEdouble)
+ if (Sem == &APFloatBase::semIEEEdouble)
return initFromDoubleAPInt(api);
- if (Sem == &semX87DoubleExtended)
+ if (Sem == &APFloatBase::semX87DoubleExtended)
return initFromF80LongDoubleAPInt(api);
- if (Sem == &semIEEEquad)
+ if (Sem == &APFloatBase::semIEEEquad)
return initFromQuadrupleAPInt(api);
- if (Sem == &semPPCDoubleDoubleLegacy)
+ if (Sem == &APFloatBase::semPPCDoubleDoubleLegacy)
return initFromPPCDoubleDoubleLegacyAPInt(api);
- if (Sem == &semFloat8E5M2)
+ if (Sem == &APFloatBase::semFloat8E5M2)
return initFromFloat8E5M2APInt(api);
- if (Sem == &semFloat8E5M2FNUZ)
+ if (Sem == &APFloatBase::semFloat8E5M2FNUZ)
return initFromFloat8E5M2FNUZAPInt(api);
- if (Sem == &semFloat8E4M3)
+ if (Sem == &APFloatBase::semFloat8E4M3)
return initFromFloat8E4M3APInt(api);
- if (Sem == &semFloat8E4M3FN)
+ if (Sem == &APFloatBase::semFloat8E4M3FN)
return initFromFloat8E4M3FNAPInt(api);
- if (Sem == &semFloat8E4M3FNUZ)
+ if (Sem == &APFloatBase::semFloat8E4M3FNUZ)
return initFromFloat8E4M3FNUZAPInt(api);
- if (Sem == &semFloat8E4M3B11FNUZ)
+ if (Sem == &APFloatBase::semFloat8E4M3B11FNUZ)
return initFromFloat8E4M3B11FNUZAPInt(api);
- if (Sem == &semFloat8E3M4)
+ if (Sem == &APFloatBase::semFloat8E3M4)
return initFromFloat8E3M4APInt(api);
- if (Sem == &semFloatTF32)
+ if (Sem == &APFloatBase::semFloatTF32)
return initFromFloatTF32APInt(api);
- if (Sem == &semFloat8E8M0FNU)
+ if (Sem == &APFloatBase::semFloat8E8M0FNU)
return initFromFloat8E8M0FNUAPInt(api);
- if (Sem == &semFloat6E3M2FN)
+ if (Sem == &APFloatBase::semFloat6E3M2FN)
return initFromFloat6E3M2FNAPInt(api);
- if (Sem == &semFloat6E2M3FN)
+ if (Sem == &APFloatBase::semFloat6E2M3FN)
return initFromFloat6E2M3FNAPInt(api);
- if (Sem == &semFloat4E2M1FN)
+ if (Sem == &APFloatBase::semFloat4E2M1FN)
return initFromFloat4E2M1FNAPInt(api);
llvm_unreachable("unsupported semantics");
@@ -4202,11 +4182,11 @@ IEEEFloat::IEEEFloat(const fltSemantics &Sem, const APInt &API) {
}
IEEEFloat::IEEEFloat(float f) {
- initFromAPInt(&semIEEEsingle, APInt::floatToBits(f));
+ initFromAPInt(&APFloatBase::semIEEEsingle, APInt::floatToBits(f));
}
IEEEFloat::IEEEFloat(double d) {
- initFromAPInt(&semIEEEdouble, APInt::doubleToBits(d));
+ initFromAPInt(&APFloatBase::semIEEEdouble, APInt::doubleToBits(d));
}
namespace {
@@ -4815,38 +4795,40 @@ IEEEFloat frexp(const IEEEFloat &Val, int &Exp, roundingMode RM) {
DoubleAPFloat::DoubleAPFloat(const fltSemantics &S)
: Semantics(&S),
- Floats(new APFloat[2]{APFloat(semIEEEdouble), APFloat(semIEEEdouble)}) {
- assert(Semantics == &semPPCDoubleDouble);
+ Floats(new APFloat[2]{APFloat(APFloatBase::semIEEEdouble),
+ APFloat(APFloatBase::semIEEEdouble)}) {
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat::DoubleAPFloat(const fltSemantics &S, uninitializedTag)
- : Semantics(&S),
- Floats(new APFloat[2]{APFloat(semIEEEdouble, uninitialized),
- APFloat(semIEEEdouble, uninitialized)}) {
- assert(Semantics == &semPPCDoubleDouble);
+ : Semantics(&S), Floats(new APFloat[2]{
+ APFloat(APFloatBase::semIEEEdouble, uninitialized),
+ APFloat(APFloatBase::semIEEEdouble, uninitialized)}) {
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat::DoubleAPFloat(const fltSemantics &S, integerPart I)
- : Semantics(&S), Floats(new APFloat[2]{APFloat(semIEEEdouble, I),
- APFloat(semIEEEdouble)}) {
- assert(Semantics == &semPPCDoubleDouble);
+ : Semantics(&S),
+ Floats(new APFloat[2]{APFloat(APFloatBase::semIEEEdouble, I),
+ APFloat(APFloatBase::semIEEEdouble)}) {
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat::DoubleAPFloat(const fltSemantics &S, const APInt &I)
: Semantics(&S),
Floats(new APFloat[2]{
- APFloat(semIEEEdouble, APInt(64, I.getRawData()[0])),
- APFloat(semIEEEdouble, APInt(64, I.getRawData()[1]))}) {
- assert(Semantics == &semPPCDoubleDouble);
+ APFloat(APFloatBase::semIEEEdouble, APInt(64, I.getRawData()[0])),
+ APFloat(APFloatBase::semIEEEdouble, APInt(64, I.getRawData()[1]))}) {
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat::DoubleAPFloat(const fltSemantics &S, APFloat &&First,
APFloat &&Second)
: Semantics(&S),
Floats(new APFloat[2]{std::move(First), std::move(Second)}) {
- assert(Semantics == &semPPCDoubleDouble);
- assert(&Floats[0].getSemantics() == &semIEEEdouble);
- assert(&Floats[1].getSemantics() == &semIEEEdouble);
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
+ assert(&Floats[0].getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&Floats[1].getSemantics() == &APFloatBase::semIEEEdouble);
}
DoubleAPFloat::DoubleAPFloat(const DoubleAPFloat &RHS)
@@ -4854,14 +4836,14 @@ DoubleAPFloat::DoubleAPFloat(const DoubleAPFloat &RHS)
Floats(RHS.Floats ? new APFloat[2]{APFloat(RHS.Floats[0]),
APFloat(RHS.Floats[1])}
: nullptr) {
- assert(Semantics == &semPPCDoubleDouble);
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat::DoubleAPFloat(DoubleAPFloat &&RHS)
: Semantics(RHS.Semantics), Floats(RHS.Floats) {
- RHS.Semantics = &semBogus;
+ RHS.Semantics = &APFloatBase::semBogus;
RHS.Floats = nullptr;
- assert(Semantics == &semPPCDoubleDouble);
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat &DoubleAPFloat::operator=(const DoubleAPFloat &RHS) {
@@ -5009,12 +4991,12 @@ APFloat::opStatus DoubleAPFloat::addWithSpecial(const DoubleAPFloat &LHS,
APFloat A(LHS.Floats[0]), AA(LHS.Floats[1]), C(RHS.Floats[0]),
CC(RHS.Floats[1]);
- assert(&A.getSemantics() == &semIEEEdouble);
- assert(&AA.getSemantics() == &semIEEEdouble);
- assert(&C.getSemantics() == &semIEEEdouble);
- assert(&CC.getSemantics() == &semIEEEdouble);
- assert(&Out.Floats[0].getSemantics() == &semIEEEdouble);
- assert(&Out.Floats[1].getSemantics() == &semIEEEdouble);
+ assert(&A.getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&AA.getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&C.getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&CC.getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&Out.Floats[0].getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&Out.Floats[1].getSemantics() == &APFloatBase::semIEEEdouble);
return Out.addImpl(A, AA, C, CC, RM);
}
@@ -5119,28 +5101,32 @@ APFloat::opStatus DoubleAPFloat::multiply(const DoubleAPFloat &RHS,
APFloat::opStatus DoubleAPFloat::divide(const DoubleAPFloat &RHS,
APFloat::roundingMode RM) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat Tmp(semPPCDoubleDoubleLegacy, bitcastToAPInt());
- auto Ret =
- Tmp.divide(APFloat(semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()), RM);
- *this = DoubleAPFloat(semPPCDoubleDouble, Tmp.bitcastToAPInt());
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat Tmp(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt());
+ auto Ret = Tmp.divide(
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()), RM);
+ *this = DoubleAPFloat(APFloatBase::semPPCDoubleDouble, Tmp.bitcastToAPInt());
return Ret;
}
APFloat::opStatus DoubleAPFloat::remainder(const DoubleAPFloat &RHS) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat Tmp(semPPCDoubleDoubleLegacy, bitcastToAPInt());
- auto Ret =
- Tmp.remainder(APFloat(semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()));
- *this = DoubleAPFloat(semPPCDoubleDouble, Tmp.bitcastToAPInt());
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat Tmp(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt());
+ auto Ret = Tmp.remainder(
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()));
+ *this = DoubleAPFloat(APFloatBase::semPPCDoubleDouble, Tmp.bitcastToAPInt());
return Ret;
}
APFloat::opStatus DoubleAPFloat::mod(const DoubleAPFloat &RHS) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat Tmp(semPPCDoubleDoubleLegacy, bitcastToAPInt());
- auto Ret = Tmp.mod(APFloat(semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()));
- *this = DoubleAPFloat(semPPCDoubleDouble, Tmp.bitcastToAPInt());
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat Tmp(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt());
+ auto Ret = Tmp.mod(
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()));
+ *this = DoubleAPFloat(APFloatBase::semPPCDoubleDouble, Tmp.bitcastToAPInt());
return Ret;
}
@@ -5148,17 +5134,21 @@ APFloat::opStatus
DoubleAPFloat::fusedMultiplyAdd(const DoubleAPFloat &Multiplicand,
const DoubleAPFloat &Addend,
APFloat::roundingMode RM) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat Tmp(semPPCDoubleDoubleLegacy, bitcastToAPInt());
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat Tmp(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt());
auto Ret = Tmp.fusedMultiplyAdd(
- APFloat(semPPCDoubleDoubleLegacy, Multiplicand.bitcastToAPInt()),
- APFloat(semPPCDoubleDoubleLegacy, Addend.bitcastToAPInt()), RM);
- *this = DoubleAPFloat(semPPCDoubleDouble, Tmp.bitcastToAPInt());
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy,
+ Multiplicand.bitcastToAPInt()),
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy, Addend.bitcastToAPInt()),
+ RM);
+ *this = DoubleAPFloat(APFloatBase::semPPCDoubleDouble, Tmp.bitcastToAPInt());
return Ret;
}
APFloat::opStatus DoubleAPFloat::roundToIntegral(APFloat::roundingMode RM) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
const APFloat &Hi = getFirst();
const APFloat &Lo = getSecond();
@@ -5309,22 +5299,28 @@ void DoubleAPFloat::makeZero(bool Neg) {
}
void DoubleAPFloat::makeLargest(bool Neg) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- Floats[0] = APFloat(semIEEEdouble, APInt(64, 0x7fefffffffffffffull));
- Floats[1] = APFloat(semIEEEdouble, APInt(64, 0x7c8ffffffffffffeull));
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ Floats[0] =
+ APFloat(APFloatBase::semIEEEdouble, APInt(64, 0x7fefffffffffffffull));
+ Floats[1] =
+ APFloat(APFloatBase::semIEEEdouble, APInt(64, 0x7c8ffffffffffffeull));
if (Neg)
changeSign();
}
void DoubleAPFloat::makeSmallest(bool Neg) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
Floats[0].makeSmallest(Neg);
Floats[1].makeZero(/* Neg = */ false);
}
void DoubleAPFloat::makeSmallestNormalized(bool Neg) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- Floats[0] = APFloat(semIEEEdouble, APInt(64, 0x0360000000000000ull));
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ Floats[0] =
+ APFloat(APFloatBase::semIEEEdouble, APInt(64, 0x0360000000000000ull));
if (Neg)
Floats[0].changeSign();
Floats[1].makeZero(/* Neg = */ false);
@@ -5355,7 +5351,8 @@ hash_code hash_value(const DoubleAPFloat &Arg) {
}
APInt DoubleAPFloat::bitcastToAPInt() const {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
uint64_t Data[] = {
Floats[0].bitcastToAPInt().getRawData()[0],
Floats[1].bitcastToAPInt().getRawData()[0],
@@ -5365,10 +5362,11 @@ APInt DoubleAPFloat::bitcastToAPInt() const {
Expected<APFloat::opStatus> DoubleAPFloat::convertFromString(StringRef S,
roundingMode RM) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat Tmp(semPPCDoubleDoubleLegacy);
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat Tmp(APFloatBase::semPPCDoubleDoubleLegacy);
auto Ret = Tmp.convertFromString(S, RM);
- *this = DoubleAPFloat(semPPCDoubleDouble, Tmp.bitcastToAPInt());
+ *this = DoubleAPFloat(APFloatBase::semPPCDoubleDouble, Tmp.bitcastToAPInt());
return Ret;
}
@@ -5379,7 +5377,8 @@ Expected<APFloat::opStatus> DoubleAPFloat::convertFromString(StringRef S,
// nextUp must choose the smallest output > input that follows these rules.
// nexDown must choose the largest output < input that follows these rules.
APFloat::opStatus DoubleAPFloat::next(bool nextDown) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
// nextDown(x) = -nextUp(-x)
if (nextDown) {
changeSign();
@@ -5481,7 +5480,8 @@ APFloat::opStatus DoubleAPFloat::next(bool nextDown) {
APFloat::opStatus DoubleAPFloat::convertToSignExtendedInteger(
MutableArrayRef<integerPart> Input, unsigned int Width, bool IsSigned,
roundingMode RM, bool *IsExact) const {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
// If Hi is not finite, or Lo is zero, the value is entirely represented
// by Hi. Delegate to the simpler single-APFloat conversion.
@@ -5761,8 +5761,9 @@ unsigned int DoubleAPFloat::convertToHexString(char *DST,
unsigned int HexDigits,
bool UpperCase,
roundingMode RM) const {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- return APFloat(semPPCDoubleDoubleLegacy, bitcastToAPInt())
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ return APFloat(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt())
.convertToHexString(DST, HexDigits, UpperCase, RM);
}
@@ -5799,7 +5800,8 @@ bool DoubleAPFloat::isLargest() const {
}
bool DoubleAPFloat::isInteger() const {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
return Floats[0].isInteger() && Floats[1].isInteger();
}
@@ -5807,8 +5809,9 @@ void DoubleAPFloat::toString(SmallVectorImpl<char> &Str,
unsigned FormatPrecision,
unsigned FormatMaxPadding,
bool TruncateZero) const {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat(semPPCDoubleDoubleLegacy, bitcastToAPInt())
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt())
.toString(Str, FormatPrecision, FormatMaxPadding, TruncateZero);
}
@@ -5840,14 +5843,17 @@ int ilogb(const DoubleAPFloat &Arg) {
DoubleAPFloat scalbn(const DoubleAPFloat &Arg, int Exp,
APFloat::roundingMode RM) {
- assert(Arg.Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- return DoubleAPFloat(semPPCDoubleDouble, scalbn(Arg.Floats[0], Exp, RM),
+ assert(Arg.Semantics == &APFloatBase::PPCDoubleDouble() &&
+ "Unexpected Semantics");
+ return DoubleAPFloat(APFloatBase::PPCDoubleDouble(),
+ scalbn(Arg.Floats[0], Exp, RM),
scalbn(Arg.Floats[1], Exp, RM));
}
DoubleAPFloat frexp(const DoubleAPFloat &Arg, int &Exp,
APFloat::roundingMode RM) {
- assert(Arg.Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Arg.Semantics == &APFloatBase::PPCDoubleDouble() &&
+ "Unexpected Semantics");
// Get the unbiased exponent e of the number, where |Arg| = m * 2^e for m in
// [1.0, 2.0).
@@ -5943,7 +5949,8 @@ DoubleAPFloat frexp(const DoubleAPFloat &Arg, int &Exp,
}
APFloat First = scalbn(Hi, -Exp, RM);
- return DoubleAPFloat(semPPCDoubleDouble, std::move(First), std::move(Second));
+ return DoubleAPFloat(APFloatBase::PPCDoubleDouble(), std::move(First),
+ std::move(Second));
}
} // namespace detail
@@ -5955,9 +5962,8 @@ APFloat::Storage::Storage(IEEEFloat F, const fltSemantics &Semantics) {
}
if (usesLayout<DoubleAPFloat>(Semantics)) {
const fltSemantics& S = F.getSemantics();
- new (&Double)
- DoubleAPFloat(Semantics, APFloat(std::move(F), S),
- APFloat(semIEEEdouble));
+ new (&Double) DoubleAPFloat(Semantics, APFloat(std::move(F), S),
+ APFloat(APFloatBase::IEEEdouble()));
return;
}
llvm_unreachable("Unexpected semantics");
@@ -6065,8 +6071,9 @@ APFloat::opStatus APFloat::convert(const fltSemantics &ToSemantics,
return U.IEEE.convert(ToSemantics, RM, losesInfo);
if (usesLayout<IEEEFloat>(getSemantics()) &&
usesLayout<DoubleAPFloat>(ToSemantics)) {
- assert(&ToSemantics == &semPPCDoubleDouble);
- auto Ret = U.IEEE.convert(semPPCDoubleDoubleLegacy, RM, losesInfo);
+ assert(&ToSemantics == &APFloatBase::semPPCDoubleDouble);
+ auto Ret =
+ U.IEEE.convert(APFloatBase::semPPCDoubleDoubleLegacy, RM, losesInfo);
*this = APFloat(ToSemantics, U.IEEE.bitcastToAPInt());
return Ret;
}
@@ -6113,13 +6120,15 @@ APFloat::opStatus APFloat::convertToInteger(APSInt &result,
}
double APFloat::convertToDouble() const {
- if (&getSemantics() == (const llvm::fltSemantics *)&semIEEEdouble)
+ if (&getSemantics() ==
+ (const llvm::fltSemantics *)&APFloatBase::semIEEEdouble)
return getIEEE().convertToDouble();
assert(isRepresentableBy(getSemantics(), semIEEEdouble) &&
"Float semantics is not representable by IEEEdouble");
APFloat Temp = *this;
bool LosesInfo;
- opStatus St = Temp.convert(semIEEEdouble, rmNearestTiesToEven, &LosesInfo);
+ opStatus St =
+ Temp.convert(APFloatBase::semIEEEdouble, rmNearestTiesToEven, &LosesInfo);
assert(!(St & opInexact) && !LosesInfo && "Unexpected imprecision");
(void)St;
return Temp.getIEEE().convertToDouble();
@@ -6127,13 +6136,14 @@ double APFloat::convertToDouble() const {
#ifdef HAS_IEE754_FLOAT128
float128 APFloat::convertToQuad() const {
- if (&getSemantics() == (const llvm::fltSemantics *)&semIEEEquad)
+ if (&getSemantics() == (const llvm::fltSemantics *)&APFloatBase::semIEEEquad)
return getIEEE().convertToQuad();
assert(isRepresentableBy(getSemantics(), semIEEEquad) &&
"Float semantics is not representable by IEEEquad");
APFloat Temp = *this;
bool LosesInfo;
- opStatus St = Temp.convert(semIEEEquad, rmNearestTiesToEven, &LosesInfo);
+ opStatus St =
+ Temp.convert(APFloatBase::semIEEEquad, rmNearestTiesToEven, &LosesInfo);
assert(!(St & opInexact) && !LosesInfo && "Unexpected imprecision");
(void)St;
return Temp.getIEEE().convertToQuad();
@@ -6141,18 +6151,84 @@ float128 APFloat::convertToQuad() const {
#endif
float APFloat::convertToFloat() const {
- if (&getSemantics() == (const llvm::fltSemantics *)&semIEEEsingle)
+ if (&getSemantics() ==
+ (const llvm::fltSemantics *)&APFloatBase::semIEEEsingle)
return getIEEE().convertToFloat();
assert(isRepresentableBy(getSemantics(), semIEEEsingle) &&
"Float semantics is not representable by IEEEsingle");
APFloat Temp = *this;
bool LosesInfo;
- opStatus St = Temp.convert(semIEEEsingle, rmNearestTiesToEven, &LosesInfo);
+ opStatus St =
+ Temp.convert(APFloatBase::semIEEEsingle, rmNearestTiesToEven, &LosesInfo);
assert(!(St & opInexact) && !LosesInfo && "Unexpected imprecision");
(void)St;
return Temp.getIEEE().convertToFloat();
}
+APFloat::Storage::~Storage() {
+ if (usesLayout<IEEEFloat>(*semantics)) {
+ IEEE.~IEEEFloat();
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*semantics)) {
+ Double.~DoubleAPFloat();
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+}
+
+APFloat::Storage::Storage(const APFloat::Storage &RHS) {
+ if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+ new (this) IEEEFloat(RHS.IEEE);
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ new (this) DoubleAPFloat(RHS.Double);
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+}
+
+APFloat::Storage::Storage(APFloat::Storage &&RHS) {
+ if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+ new (this) IEEEFloat(std::move(RHS.IEEE));
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ new (this) DoubleAPFloat(std::move(RHS.Double));
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+}
+
+APFloat::Storage &APFloat::Storage::operator=(const APFloat::Storage &RHS) {
+ if (usesLayout<IEEEFloat>(*semantics) &&
+ usesLayout<IEEEFloat>(*RHS.semantics)) {
+ IEEE = RHS.IEEE;
+ } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+ usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ Double = RHS.Double;
+ } else if (this != &RHS) {
+ this->~Storage();
+ new (this) Storage(RHS);
+ }
+ return *this;
+}
+
+APFloat::Storage &APFloat::Storage::operator=(APFloat::Storage &&RHS) {
+ if (usesLayout<IEEEFloat>(*semantics) &&
+ usesLayout<IEEEFloat>(*RHS.semantics)) {
+ IEEE = std::move(RHS.IEEE);
+ } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+ usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ Double = std::move(RHS.Double);
+ } else if (this != &RHS) {
+ this->~Storage();
+ new (this) Storage(std::move(RHS));
+ }
+ return *this;
+}
+
} // namespace llvm
#undef APFLOAT_DISPATCH_ON_SEMANTICS
diff --git a/llvm/lib/Support/SourceMgr.cpp b/llvm/lib/Support/SourceMgr.cpp
index f2bbaab..299615a 100644
--- a/llvm/lib/Support/SourceMgr.cpp
+++ b/llvm/lib/Support/SourceMgr.cpp
@@ -69,11 +69,11 @@ unsigned SourceMgr::AddIncludeFile(const std::string &Filename,
ErrorOr<std::unique_ptr<MemoryBuffer>>
SourceMgr::OpenIncludeFile(const std::string &Filename,
std::string &IncludedFile) {
- if (!FS)
- reportFatalInternalError("Opening include file from SourceMgr without VFS");
+ auto GetFile = [this](StringRef Path) {
+ return FS ? FS->getBufferForFile(Path) : MemoryBuffer::getFile(Path);
+ };
- ErrorOr<std::unique_ptr<MemoryBuffer>> NewBufOrErr =
- FS->getBufferForFile(Filename);
+ ErrorOr<std::unique_ptr<MemoryBuffer>> NewBufOrErr = GetFile(Filename);
SmallString<64> Buffer(Filename);
// If the file didn't exist directly, see if it's in an include path.
@@ -81,7 +81,7 @@ SourceMgr::OpenIncludeFile(const std::string &Filename,
++i) {
Buffer = IncludeDirectories[i];
sys::path::append(Buffer, Filename);
- NewBufOrErr = FS->getBufferForFile(Buffer);
+ NewBufOrErr = GetFile(Buffer);
}
if (NewBufOrErr)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 1b559a6..8ed4062 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -514,8 +514,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
MVT::i64, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
- setOperationAction({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX}, MVT::i32,
- Legal);
+ setOperationAction({ISD::ABS, ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX},
+ MVT::i32, Legal);
setOperationAction(
{ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index d0ad120..b841171 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -1488,6 +1488,12 @@ let AssemblerPredicate = isGFX12Plus in {
def : MnemonicAlias<"ds_load_tr_b64", "ds_load_tr8_b64">, Requires<[isGFX1250Plus]>;
def : MnemonicAlias<"ds_load_tr_b128", "ds_load_tr16_b128">, Requires<[isGFX1250Plus]>;
+// Additional aliases for ds load transpose instructions.
+def : MnemonicAlias<"ds_load_b64_tr_b8", "ds_load_tr8_b64">, Requires<[isGFX125xOnly]>;
+def : MnemonicAlias<"ds_load_b128_tr_b16", "ds_load_tr16_b128">, Requires<[isGFX125xOnly]>;
+def : MnemonicAlias<"ds_load_b64_tr_b4", "ds_load_tr4_b64">, Requires<[isGFX125xOnly]>;
+def : MnemonicAlias<"ds_load_b96_tr_b6", "ds_load_tr6_b96">, Requires<[isGFX125xOnly]>;
+
//===----------------------------------------------------------------------===//
// GFX11.
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index e0375ea..e3f3aba 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -892,6 +892,7 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
// have EXEC as implicit destination. Issue a warning if encoding for
// vdst is not EXEC.
if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3) &&
+ MCII->get(MI.getOpcode()).getNumDefs() == 0 &&
MCII->get(MI.getOpcode()).hasImplicitDefOfPhysReg(AMDGPU::EXEC)) {
auto ExecEncoding = MRI.getEncodingValue(AMDGPU::EXEC_LO);
if (Bytes_[0] != ExecEncoding)
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 6de59be..8ea64d1 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -3711,6 +3711,12 @@ defm GLOBAL_LOAD_TR_B64_w32 : VFLAT_Real_AllAddr_gfx1250<0x058, "globa
defm GLOBAL_LOAD_TR4_B64 : VFLAT_Real_AllAddr_gfx1250<0x073>;
defm GLOBAL_LOAD_TR6_B96 : VFLAT_Real_AllAddr_gfx1250<0x074>;
+// Additional aliases for global load transpose instructions.
+def : MnemonicAlias<"global_load_b128_tr_b16", "global_load_tr16_b128">, Requires<[isGFX125xOnly]>;
+def : MnemonicAlias<"global_load_b64_tr_b8", "global_load_tr8_b64">, Requires<[isGFX125xOnly]>;
+def : MnemonicAlias<"global_load_b64_tr_b4", "global_load_tr4_b64">, Requires<[isGFX125xOnly]>;
+def : MnemonicAlias<"global_load_b96_tr_b6", "global_load_tr6_b96">, Requires<[isGFX125xOnly]>;
+
defm FLAT_ATOMIC_ADD_F64 : VFLAT_Real_Atomics_gfx1250<0x055>;
defm FLAT_ATOMIC_MIN_F64 : VFLAT_Real_Atomics_gfx1250<0x05b, "flat_atomic_min_num_f64">;
defm FLAT_ATOMIC_MAX_F64 : VFLAT_Real_Atomics_gfx1250<0x05c, "flat_atomic_max_num_f64">;
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
index 979a8b0..4b22c68 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
@@ -21,6 +21,7 @@
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/RegisterPressure.h"
#include <algorithm>
+#include <array>
namespace llvm {
@@ -45,7 +46,7 @@ struct GCNRegPressure {
return !Value[SGPR] && !Value[VGPR] && !Value[AGPR] && !Value[AVGPR];
}
- void clear() { std::fill(&Value[0], &Value[ValueArraySize], 0); }
+ void clear() { Value.fill(0); }
unsigned getNumRegs(RegKind Kind) const {
assert(Kind < TOTAL_KINDS);
@@ -127,9 +128,7 @@ struct GCNRegPressure {
bool less(const MachineFunction &MF, const GCNRegPressure &O,
unsigned MaxOccupancy = std::numeric_limits<unsigned>::max()) const;
- bool operator==(const GCNRegPressure &O) const {
- return std::equal(&Value[0], &Value[ValueArraySize], O.Value);
- }
+ bool operator==(const GCNRegPressure &O) const { return Value == O.Value; }
bool operator!=(const GCNRegPressure &O) const {
return !(*this == O);
@@ -160,7 +159,7 @@ private:
/// Pressure for all register kinds (first all regular registers kinds, then
/// all tuple register kinds).
- unsigned Value[ValueArraySize];
+ std::array<unsigned, ValueArraySize> Value;
static unsigned getRegKind(const TargetRegisterClass *RC,
const SIRegisterInfo *STI);
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index d516330..50447f4 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -9072,6 +9072,67 @@ void SIInstrInfo::movePackToVALU(SIInstrWorklist &Worklist,
MachineOperand &Src1 = Inst.getOperand(2);
const DebugLoc &DL = Inst.getDebugLoc();
+ if (ST.useRealTrue16Insts()) {
+ Register SrcReg0, SrcReg1;
+ if (!Src0.isReg() || !RI.isVGPR(MRI, Src0.getReg())) {
+ SrcReg0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), SrcReg0).add(Src0);
+ } else {
+ SrcReg0 = Src0.getReg();
+ }
+
+ if (!Src1.isReg() || !RI.isVGPR(MRI, Src1.getReg())) {
+ SrcReg1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), SrcReg1).add(Src1);
+ } else {
+ SrcReg1 = Src1.getReg();
+ }
+
+ bool isSrc0Reg16 = MRI.constrainRegClass(SrcReg0, &AMDGPU::VGPR_16RegClass);
+ bool isSrc1Reg16 = MRI.constrainRegClass(SrcReg1, &AMDGPU::VGPR_16RegClass);
+
+ auto NewMI = BuildMI(*MBB, Inst, DL, get(AMDGPU::REG_SEQUENCE), ResultReg);
+ switch (Inst.getOpcode()) {
+ case AMDGPU::S_PACK_LL_B32_B16:
+ NewMI
+ .addReg(SrcReg0, 0,
+ isSrc0Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
+ .addImm(AMDGPU::lo16)
+ .addReg(SrcReg1, 0,
+ isSrc1Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
+ .addImm(AMDGPU::hi16);
+ break;
+ case AMDGPU::S_PACK_LH_B32_B16:
+ NewMI
+ .addReg(SrcReg0, 0,
+ isSrc0Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
+ .addImm(AMDGPU::lo16)
+ .addReg(SrcReg1, 0, AMDGPU::hi16)
+ .addImm(AMDGPU::hi16);
+ break;
+ case AMDGPU::S_PACK_HL_B32_B16:
+ NewMI.addReg(SrcReg0, 0, AMDGPU::hi16)
+ .addImm(AMDGPU::lo16)
+ .addReg(SrcReg1, 0,
+ isSrc1Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
+ .addImm(AMDGPU::hi16);
+ break;
+ case AMDGPU::S_PACK_HH_B32_B16:
+ NewMI.addReg(SrcReg0, 0, AMDGPU::hi16)
+ .addImm(AMDGPU::lo16)
+ .addReg(SrcReg1, 0, AMDGPU::hi16)
+ .addImm(AMDGPU::hi16);
+ break;
+ default:
+ llvm_unreachable("unhandled s_pack_* instruction");
+ }
+
+ MachineOperand &Dest = Inst.getOperand(0);
+ MRI.replaceRegWith(Dest.getReg(), ResultReg);
+ addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
+ return;
+ }
+
switch (Inst.getOpcode()) {
case AMDGPU::S_PACK_LL_B32_B16: {
Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
index 01a40c1..7431e11 100644
--- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
@@ -47,9 +47,6 @@ private:
const MachineBasicBlock &From,
const MachineBasicBlock &To) const;
bool removeExeczBranch(MachineInstr &MI, MachineBasicBlock &SrcMBB);
- // Check if the machine instruction being processed is a supported packed
- // instruction.
- bool isUnpackingSupportedInstr(MachineInstr &MI) const;
// Creates a list of packed instructions following an MFMA that are suitable
// for unpacking.
void collectUnpackingCandidates(MachineInstr &BeginMI,
@@ -454,23 +451,6 @@ bool SIPreEmitPeephole::removeExeczBranch(MachineInstr &MI,
return true;
}
-// If support is extended to new operations, add tests in
-// llvm/test/CodeGen/AMDGPU/unpack-non-coissue-insts-post-ra-scheduler.mir.
-bool SIPreEmitPeephole::isUnpackingSupportedInstr(MachineInstr &MI) const {
- if (!TII->isNeverCoissue(MI))
- return false;
- unsigned Opcode = MI.getOpcode();
- switch (Opcode) {
- case AMDGPU::V_PK_ADD_F32:
- case AMDGPU::V_PK_MUL_F32:
- case AMDGPU::V_PK_FMA_F32:
- return true;
- default:
- return false;
- }
- llvm_unreachable("Fully covered switch");
-}
-
bool SIPreEmitPeephole::canUnpackingClobberRegister(const MachineInstr &MI) {
unsigned OpCode = MI.getOpcode();
Register DstReg = MI.getOperand(0).getReg();
@@ -612,10 +592,13 @@ void SIPreEmitPeephole::collectUnpackingCandidates(
for (auto I = std::next(BeginMI.getIterator()); I != E; ++I) {
MachineInstr &Instr = *I;
+ uint16_t UnpackedOpCode = mapToUnpackedOpcode(Instr);
+ bool IsUnpackable =
+ !(UnpackedOpCode == std::numeric_limits<uint16_t>::max());
if (Instr.isMetaInstruction())
continue;
if ((Instr.isTerminator()) ||
- (TII->isNeverCoissue(Instr) && !isUnpackingSupportedInstr(Instr)) ||
+ (TII->isNeverCoissue(Instr) && !IsUnpackable) ||
(SIInstrInfo::modifiesModeRegister(Instr) &&
Instr.modifiesRegister(AMDGPU::EXEC, TRI)))
return;
@@ -639,7 +622,7 @@ void SIPreEmitPeephole::collectUnpackingCandidates(
if (TRI->regsOverlap(MFMADef, InstrMO.getReg()))
return;
}
- if (!isUnpackingSupportedInstr(Instr))
+ if (!IsUnpackable)
continue;
if (canUnpackingClobberRegister(Instr))
@@ -687,8 +670,8 @@ MachineInstrBuilder SIPreEmitPeephole::createUnpackedMI(MachineInstr &I,
bool IsHiBits) {
MachineBasicBlock &MBB = *I.getParent();
const DebugLoc &DL = I.getDebugLoc();
- const MachineOperand *SrcMO1 = TII->getNamedOperand(I, AMDGPU::OpName::src0);
- const MachineOperand *SrcMO2 = TII->getNamedOperand(I, AMDGPU::OpName::src1);
+ const MachineOperand *SrcMO0 = TII->getNamedOperand(I, AMDGPU::OpName::src0);
+ const MachineOperand *SrcMO1 = TII->getNamedOperand(I, AMDGPU::OpName::src1);
Register DstReg = I.getOperand(0).getReg();
unsigned OpCode = I.getOpcode();
Register UnpackedDstReg = IsHiBits ? TRI->getSubReg(DstReg, AMDGPU::sub1)
@@ -702,15 +685,15 @@ MachineInstrBuilder SIPreEmitPeephole::createUnpackedMI(MachineInstr &I,
MachineInstrBuilder NewMI = BuildMI(MBB, I, DL, TII->get(UnpackedOpcode));
NewMI.addDef(UnpackedDstReg); // vdst
- addOperandAndMods(NewMI, Src0Mods, IsHiBits, *SrcMO1);
- addOperandAndMods(NewMI, Src1Mods, IsHiBits, *SrcMO2);
+ addOperandAndMods(NewMI, Src0Mods, IsHiBits, *SrcMO0);
+ addOperandAndMods(NewMI, Src1Mods, IsHiBits, *SrcMO1);
if (AMDGPU::hasNamedOperand(OpCode, AMDGPU::OpName::src2)) {
- const MachineOperand *SrcMO3 =
+ const MachineOperand *SrcMO2 =
TII->getNamedOperand(I, AMDGPU::OpName::src2);
unsigned Src2Mods =
TII->getNamedOperand(I, AMDGPU::OpName::src2_modifiers)->getImm();
- addOperandAndMods(NewMI, Src2Mods, IsHiBits, *SrcMO3);
+ addOperandAndMods(NewMI, Src2Mods, IsHiBits, *SrcMO2);
}
NewMI.addImm(ClampVal); // clamp
// Packed instructions do not support output modifiers. safe to assign them 0
@@ -787,9 +770,13 @@ bool SIPreEmitPeephole::run(MachineFunction &MF) {
// TODO: Fold this into previous block, if possible. Evaluate and handle any
// side effects.
+
+ // Perform the extra MF scans only for supported archs
+ if (!ST.hasGFX940Insts())
+ return Changed;
for (MachineBasicBlock &MBB : MF) {
- // Unpack packed instructions overlapped by MFMAs. This allows the compiler
- // to co-issue unpacked instructions with MFMA
+ // Unpack packed instructions overlapped by MFMAs. This allows the
+ // compiler to co-issue unpacked instructions with MFMA
auto SchedModel = TII->getSchedModel();
SetVector<MachineInstr *> InstrsToUnpack;
for (auto &MI : make_early_inc_range(MBB.instrs())) {
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 67ea2dd..35e1127 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -21287,21 +21287,28 @@ bool ARMTargetLowering::useLoadStackGuardNode(const Module &M) const {
}
void ARMTargetLowering::insertSSPDeclarations(Module &M) const {
+ // MSVC CRT provides functionalities for stack protection.
RTLIB::LibcallImpl SecurityCheckCookieLibcall =
getLibcallImpl(RTLIB::SECURITY_CHECK_COOKIE);
- if (SecurityCheckCookieLibcall == RTLIB::Unsupported)
- return TargetLowering::insertSSPDeclarations(M);
- // MSVC CRT has a global variable holding security cookie.
- M.getOrInsertGlobal("__security_cookie",
- PointerType::getUnqual(M.getContext()));
+ RTLIB::LibcallImpl SecurityCookieVar =
+ getLibcallImpl(RTLIB::STACK_CHECK_GUARD);
+ if (SecurityCheckCookieLibcall != RTLIB::Unsupported &&
+ SecurityCookieVar != RTLIB::Unsupported) {
+ // MSVC CRT has a global variable holding security cookie.
+ M.getOrInsertGlobal(getLibcallImplName(SecurityCookieVar),
+ PointerType::getUnqual(M.getContext()));
- // MSVC CRT has a function to validate security cookie.
- FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
- getLibcallImplName(SecurityCheckCookieLibcall),
- Type::getVoidTy(M.getContext()), PointerType::getUnqual(M.getContext()));
- if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee()))
- F->addParamAttr(0, Attribute::AttrKind::InReg);
+ // MSVC CRT has a function to validate security cookie.
+ FunctionCallee SecurityCheckCookie =
+ M.getOrInsertFunction(getLibcallImplName(SecurityCheckCookieLibcall),
+ Type::getVoidTy(M.getContext()),
+ PointerType::getUnqual(M.getContext()));
+ if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee()))
+ F->addParamAttr(0, Attribute::AttrKind::InReg);
+ }
+
+ TargetLowering::insertSSPDeclarations(M);
}
Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const {
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 5ceb477..19992e6 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -695,6 +695,9 @@ def HasStdExtZvfbfa : Predicate<"Subtarget->hasStdExtZvfbfa()">,
def FeatureStdExtZvfbfmin
: RISCVExtension<1, 0, "Vector BF16 Converts", [FeatureStdExtZve32f]>;
+def HasStdExtZvfbfmin : Predicate<"Subtarget->hasStdExtZvfbfmin()">,
+ AssemblerPredicate<(all_of FeatureStdExtZvfbfmin),
+ "'Zvfbfmin' (Vector BF16 Converts)">;
def FeatureStdExtZvfbfwma
: RISCVExtension<1, 0, "Vector BF16 widening mul-add",
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index eb87558..169465e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -24830,7 +24830,8 @@ bool RISCVTargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
// instruction, as it is usually smaller than the alternative sequence.
// TODO: Add vector division?
bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
- return OptSize && !VT.isVector();
+ return OptSize && !VT.isVector() &&
+ VT.getSizeInBits() <= getMaxDivRemBitWidthSupported();
}
bool RISCVTargetLowering::preferScalarizeSplat(SDNode *N) const {
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 1b7cb9b..636e31c 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -699,7 +699,8 @@ public:
"Can't encode VTYPE for uninitialized or unknown");
if (TWiden != 0)
return RISCVVType::encodeXSfmmVType(SEW, TWiden, AltFmt);
- return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic);
+ return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic,
+ AltFmt);
}
bool hasSEWLMULRatioOnly() const { return SEWLMULRatioOnly; }
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index ddb53a2..12f776b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -3775,11 +3775,13 @@ std::string RISCVInstrInfo::createMIROperandComment(
#define CASE_VFMA_OPCODE_VV(OP) \
CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
+ case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
#define CASE_VFMA_SPLATS(OP) \
CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
+ case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
// clang-format on
@@ -4003,11 +4005,13 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
// clang-format on
@@ -4469,6 +4473,20 @@ bool RISCVInstrInfo::simplifyInstruction(MachineInstr &MI) const {
CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
+
+#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
+ CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
+ case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
+ case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
+ case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
+ case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
+
+#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
+ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
+ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
+ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
+ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
+ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
// clang-format on
MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
@@ -4478,6 +4496,8 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
switch (MI.getOpcode()) {
default:
return nullptr;
+ case CASE_FP_WIDEOP_OPCODE_LMULS_ALT(FWADD_ALT_WV):
+ case CASE_FP_WIDEOP_OPCODE_LMULS_ALT(FWSUB_ALT_WV):
case CASE_FP_WIDEOP_OPCODE_LMULS(FWADD_WV):
case CASE_FP_WIDEOP_OPCODE_LMULS(FWSUB_WV): {
assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
@@ -4494,6 +4514,8 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
llvm_unreachable("Unexpected opcode");
CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(FWADD_WV)
CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(FWSUB_WV)
+ CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(FWADD_ALT_WV)
+ CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(FWSUB_ALT_WV)
}
// clang-format on
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 65865ce..eb3c9b0 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -5862,20 +5862,6 @@ multiclass VPatConversionWF_VF<string intrinsic, string instruction,
}
}
-multiclass VPatConversionWF_VF_BF<string intrinsic, string instruction,
- bit isSEWAware = 0> {
- foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in
- {
- defvar fvti = fvtiToFWti.Vti;
- defvar fwti = fvtiToFWti.Wti;
- let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
- GetVTypePredicates<fwti>.Predicates) in
- defm : VPatConversion<intrinsic, instruction, "V",
- fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
- fvti.LMul, fwti.RegClass, fvti.RegClass, isSEWAware>;
- }
-}
-
multiclass VPatConversionVI_WF<string intrinsic, string instruction> {
foreach vtiToWti = AllWidenableIntToFloatVectors in {
defvar vti = vtiToWti.Vti;
@@ -5969,20 +5955,6 @@ multiclass VPatConversionVF_WF_RTZ<string intrinsic, string instruction,
}
}
-multiclass VPatConversionVF_WF_BF_RM<string intrinsic, string instruction,
- bit isSEWAware = 0> {
- foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
- defvar fvti = fvtiToFWti.Vti;
- defvar fwti = fvtiToFWti.Wti;
- let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
- GetVTypePredicates<fwti>.Predicates) in
- defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
- fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
- fvti.LMul, fvti.RegClass, fwti.RegClass,
- isSEWAware>;
- }
-}
-
multiclass VPatCompare_VI<string intrinsic, string inst,
ImmLeaf ImmType> {
foreach vti = AllIntegerVectors in {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
index 0be9eab..9358486 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
@@ -36,7 +36,7 @@ defm VFWMACCBF16_V : VWMAC_FV_V_F<"vfwmaccbf16", 0b111011>;
//===----------------------------------------------------------------------===//
// Pseudo instructions
//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in {
+let Predicates = [HasStdExtZvfbfmin] in {
defm PseudoVFWCVTBF16_F_F : VPseudoVWCVTD_V;
defm PseudoVFNCVTBF16_F_F : VPseudoVNCVTD_W_RM;
}
@@ -44,10 +44,364 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in {
let mayRaiseFPException = true, Predicates = [HasStdExtZvfbfwma] in
defm PseudoVFWMACCBF16 : VPseudoVWMAC_VV_VF_BF_RM;
+defset list<VTypeInfoToWide> AllWidenableIntToBF16Vectors = {
+ def : VTypeInfoToWide<VI8MF8, VBF16MF4>;
+ def : VTypeInfoToWide<VI8MF4, VBF16MF2>;
+ def : VTypeInfoToWide<VI8MF2, VBF16M1>;
+ def : VTypeInfoToWide<VI8M1, VBF16M2>;
+ def : VTypeInfoToWide<VI8M2, VBF16M4>;
+ def : VTypeInfoToWide<VI8M4, VBF16M8>;
+}
+
+multiclass VPseudoVALU_VV_VF_RM_BF16 {
+ foreach m = MxListF in {
+ defm "" : VPseudoBinaryFV_VV_RM<m, 16/*sew*/>,
+ SchedBinary<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
+ SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVALU_VF_RM_BF16 {
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
+ SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVFWALU_VV_VF_RM_BF16 {
+ foreach m = MxListFW in {
+ defm "" : VPseudoBinaryW_VV_RM<m, sew=16>,
+ SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxListFW in {
+ defm "" : VPseudoBinaryW_VF_RM<m, f, sew=f.SEW>,
+ SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVFWALU_WV_WF_RM_BF16 {
+ foreach m = MxListFW in {
+ defm "" : VPseudoBinaryW_WV_RM<m, sew=16>,
+ SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+ defvar f = SCALAR_F16;
+ foreach m = f.MxListFW in {
+ defm "" : VPseudoBinaryW_WF_RM<m, f, sew=f.SEW>,
+ SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVFMUL_VV_VF_RM_BF16 {
+ foreach m = MxListF in {
+ defm "" : VPseudoBinaryFV_VV_RM<m, 16/*sew*/>,
+ SchedBinary<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
+ SchedBinary<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVWMUL_VV_VF_RM_BF16 {
+ foreach m = MxListFW in {
+ defm "" : VPseudoBinaryW_VV_RM<m, sew=16>,
+ SchedBinary<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxListFW in {
+ defm "" : VPseudoBinaryW_VF_RM<m, f, sew=f.SEW>,
+ SchedBinary<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVMAC_VV_VF_AAXA_RM_BF16 {
+ foreach m = MxListF in {
+ defm "" : VPseudoTernaryV_VV_AAXA_RM<m, 16/*sew*/>,
+ SchedTernary<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV",
+ "ReadVFMulAddV", m.MX, 16/*sew*/>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoTernaryV_VF_AAXA_RM<m, f, f.SEW>,
+ SchedTernary<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF",
+ "ReadVFMulAddV", m.MX, f.SEW>;
+ }
+}
+
+multiclass VPseudoVWMAC_VV_VF_RM_BF16 {
+ foreach m = MxListFW in {
+ defm "" : VPseudoTernaryW_VV_RM<m, sew=16>,
+ SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV",
+ "ReadVFWMulAddV", "ReadVFWMulAddV", m.MX, 16/*sew*/>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxListFW in {
+ defm "" : VPseudoTernaryW_VF_RM<m, f, sew=f.SEW>,
+ SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV",
+ "ReadVFWMulAddF", "ReadVFWMulAddV", m.MX, f.SEW>;
+ }
+}
+
+multiclass VPseudoVRCP_V_BF16 {
+ foreach m = MxListF in {
+ defvar mx = m.MX;
+ let VLMul = m.value in {
+ def "_V_" # mx # "_E16"
+ : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/,
+ forcePassthruRead=true>;
+ def "_V_" # mx # "_E16_MASK"
+ : VPseudoUnaryMask<m.vrclass, m.vrclass>,
+ RISCVMaskedPseudo<MaskIdx = 2>,
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/,
+ forcePassthruRead=true>;
+ }
+ }
+}
+
+multiclass VPseudoVRCP_V_RM_BF16 {
+ foreach m = MxListF in {
+ defvar mx = m.MX;
+ let VLMul = m.value in {
+ def "_V_" # mx # "_E16"
+ : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/,
+ forcePassthruRead=true>;
+ def "_V_" # mx # "_E16_MASK"
+ : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
+ RISCVMaskedPseudo<MaskIdx = 2>,
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/,
+ forcePassthruRead=true>;
+ }
+ }
+}
+
+multiclass VPseudoVMAX_VV_VF_BF16 {
+ foreach m = MxListF in {
+ defm "" : VPseudoBinaryV_VV<m, sew=16>,
+ SchedBinary<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV",
+ m.MX, 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoBinaryV_VF<m, f, f.SEW>,
+ SchedBinary<"WriteVFMinMaxF", "ReadVFMinMaxV", "ReadVFMinMaxF",
+ m.MX, f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVSGNJ_VV_VF_BF16 {
+ foreach m = MxListF in {
+ defm "" : VPseudoBinaryV_VV<m, sew=16>,
+ SchedBinary<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoBinaryV_VF<m, f, f.SEW>,
+ SchedBinary<"WriteVFSgnjF", "ReadVFSgnjV", "ReadVFSgnjF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVWCVTF_V_BF16 {
+ defvar constraint = "@earlyclobber $rd";
+ foreach m = MxListW in
+ defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=8,
+ TargetConstraintType=3>,
+ SchedUnary<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV", m.MX, 8/*sew*/,
+ forcePassthruRead=true>;
+}
+
+multiclass VPseudoVWCVTD_V_BF16 {
+ defvar constraint = "@earlyclobber $rd";
+ foreach m = MxListFW in
+ defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=16,
+ TargetConstraintType=3>,
+ SchedUnary<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV", m.MX, 16/*sew*/,
+ forcePassthruRead=true>;
+}
+
+multiclass VPseudoVNCVTD_W_BF16 {
+ defvar constraint = "@earlyclobber $rd";
+ foreach m = MxListFW in
+ defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint, sew=16,
+ TargetConstraintType=2>,
+ SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, 16/*sew*/,
+ forcePassthruRead=true>;
+}
+
+multiclass VPseudoVNCVTD_W_RM_BF16 {
+ defvar constraint = "@earlyclobber $rd";
+ foreach m = MxListFW in
+ defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m,
+ constraint, sew=16,
+ TargetConstraintType=2>,
+ SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, 16/*sew*/,
+ forcePassthruRead=true>;
+}
+
+let Predicates = [HasStdExtZvfbfa], AltFmtType = IS_ALTFMT in {
+let mayRaiseFPException = true in {
+defm PseudoVFADD_ALT : VPseudoVALU_VV_VF_RM_BF16;
+defm PseudoVFSUB_ALT : VPseudoVALU_VV_VF_RM_BF16;
+defm PseudoVFRSUB_ALT : VPseudoVALU_VF_RM_BF16;
+}
+
+let mayRaiseFPException = true in {
+defm PseudoVFWADD_ALT : VPseudoVFWALU_VV_VF_RM_BF16;
+defm PseudoVFWSUB_ALT : VPseudoVFWALU_VV_VF_RM_BF16;
+defm PseudoVFWADD_ALT : VPseudoVFWALU_WV_WF_RM_BF16;
+defm PseudoVFWSUB_ALT : VPseudoVFWALU_WV_WF_RM_BF16;
+}
+
+let mayRaiseFPException = true in
+defm PseudoVFMUL_ALT : VPseudoVFMUL_VV_VF_RM_BF16;
+
+let mayRaiseFPException = true in
+defm PseudoVFWMUL_ALT : VPseudoVWMUL_VV_VF_RM_BF16;
+
+let mayRaiseFPException = true in {
+defm PseudoVFMACC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFNMACC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFMSAC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFNMSAC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFMADD_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFNMADD_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFMSUB_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFNMSUB_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+}
+
+let mayRaiseFPException = true in {
+defm PseudoVFWMACC_ALT : VPseudoVWMAC_VV_VF_RM_BF16;
+defm PseudoVFWNMACC_ALT : VPseudoVWMAC_VV_VF_RM_BF16;
+defm PseudoVFWMSAC_ALT : VPseudoVWMAC_VV_VF_RM_BF16;
+defm PseudoVFWNMSAC_ALT : VPseudoVWMAC_VV_VF_RM_BF16;
+}
+
+let mayRaiseFPException = true in
+defm PseudoVFRSQRT7_ALT : VPseudoVRCP_V_BF16;
+
+let mayRaiseFPException = true in
+defm PseudoVFREC7_ALT : VPseudoVRCP_V_RM_BF16;
+
+let mayRaiseFPException = true in {
+defm PseudoVFMIN_ALT : VPseudoVMAX_VV_VF_BF16;
+defm PseudoVFMAX_ALT : VPseudoVMAX_VV_VF_BF16;
+}
+
+defm PseudoVFSGNJ_ALT : VPseudoVSGNJ_VV_VF_BF16;
+defm PseudoVFSGNJN_ALT : VPseudoVSGNJ_VV_VF_BF16;
+defm PseudoVFSGNJX_ALT : VPseudoVSGNJ_VV_VF_BF16;
+
+let mayRaiseFPException = true in {
+defm PseudoVMFEQ_ALT : VPseudoVCMPM_VV_VF;
+defm PseudoVMFNE_ALT : VPseudoVCMPM_VV_VF;
+defm PseudoVMFLT_ALT : VPseudoVCMPM_VV_VF;
+defm PseudoVMFLE_ALT : VPseudoVCMPM_VV_VF;
+defm PseudoVMFGT_ALT : VPseudoVCMPM_VF;
+defm PseudoVMFGE_ALT : VPseudoVCMPM_VF;
+}
+
+defm PseudoVFCLASS_ALT : VPseudoVCLS_V;
+
+defm PseudoVFMERGE_ALT : VPseudoVMRG_FM;
+
+defm PseudoVFMV_V_ALT : VPseudoVMV_F;
+
+let mayRaiseFPException = true in {
+defm PseudoVFWCVT_F_XU_ALT : VPseudoVWCVTF_V_BF16;
+defm PseudoVFWCVT_F_X_ALT : VPseudoVWCVTF_V_BF16;
+
+defm PseudoVFWCVT_F_F_ALT : VPseudoVWCVTD_V_BF16;
+} // mayRaiseFPException = true
+
+let mayRaiseFPException = true in {
+let hasSideEffects = 0, hasPostISelHook = 1 in {
+defm PseudoVFNCVT_XU_F_ALT : VPseudoVNCVTI_W_RM;
+defm PseudoVFNCVT_X_F_ALT : VPseudoVNCVTI_W_RM;
+}
+
+defm PseudoVFNCVT_RTZ_XU_F_ALT : VPseudoVNCVTI_W;
+defm PseudoVFNCVT_RTZ_X_F_ALT : VPseudoVNCVTI_W;
+
+defm PseudoVFNCVT_F_F_ALT : VPseudoVNCVTD_W_RM_BF16;
+
+defm PseudoVFNCVT_ROD_F_F_ALT : VPseudoVNCVTD_W_BF16;
+} // mayRaiseFPException = true
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
+ defvar f = SCALAR_F16;
+ let HasSEWOp = 1, BaseInstr = VFMV_F_S in
+ def "PseudoVFMV_" # f.FX # "_S_ALT" :
+ RISCVVPseudo<(outs f.fprclass:$rd), (ins VR:$rs2, sew:$sew)>,
+ Sched<[WriteVMovFS, ReadVMovFS]>;
+ let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, isReMaterializable = 1,
+ Constraints = "$rd = $passthru" in
+ def "PseudoVFMV_S_" # f.FX # "_ALT" :
+ RISCVVPseudo<(outs VR:$rd),
+ (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew)>,
+ Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>;
+}
+
+defm PseudoVFSLIDE1UP_ALT : VPseudoVSLD1_VF<"@earlyclobber $rd">;
+defm PseudoVFSLIDE1DOWN_ALT : VPseudoVSLD1_VF;
+} // Predicates = [HasStdExtZvfbfa], AltFmtType = IS_ALTFMT
+
//===----------------------------------------------------------------------===//
// Patterns
//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in {
+multiclass VPatConversionWF_VF_BF<string intrinsic, string instruction,
+ bit isSEWAware = 0> {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in
+ {
+ defvar fvti = fvtiToFWti.Vti;
+ defvar fwti = fvtiToFWti.Wti;
+ defm : VPatConversion<intrinsic, instruction, "V",
+ fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
+ fvti.LMul, fwti.RegClass, fvti.RegClass, isSEWAware>;
+ }
+}
+
+multiclass VPatConversionVF_WF_BF_RM<string intrinsic, string instruction,
+ bit isSEWAware = 0> {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
+ defvar fvti = fvtiToFWti.Vti;
+ defvar fwti = fvtiToFWti.Wti;
+ defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
+ fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
+ fvti.LMul, fvti.RegClass, fwti.RegClass,
+ isSEWAware>;
+ }
+}
+
+let Predicates = [HasStdExtZvfbfmin] in {
defm : VPatConversionWF_VF_BF<"int_riscv_vfwcvtbf16_f_f_v",
"PseudoVFWCVTBF16_F_F", isSEWAware=1>;
defm : VPatConversionVF_WF_BF_RM<"int_riscv_vfncvtbf16_f_f_w",
@@ -56,7 +410,6 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in {
foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
defvar fvti = fvtiToFWti.Vti;
defvar fwti = fvtiToFWti.Wti;
- let Predicates = [HasVInstructionsBF16Minimal] in
def : Pat<(fwti.Vector (any_riscv_fpextend_vl
(fvti.Vector fvti.RegClass:$rs1),
(fvti.Mask VMV0:$vm),
@@ -66,18 +419,16 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in {
(fvti.Mask VMV0:$vm),
GPR:$vl, fvti.Log2SEW, TA_MA)>;
- let Predicates = [HasVInstructionsBF16Minimal] in
- def : Pat<(fvti.Vector (any_riscv_fpround_vl
- (fwti.Vector fwti.RegClass:$rs1),
- (fwti.Mask VMV0:$vm), VLOpFrag)),
- (!cast<Instruction>("PseudoVFNCVTBF16_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
- (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
- (fwti.Mask VMV0:$vm),
- // Value to indicate no rounding mode change in
- // RISCVInsertReadWriteCSR
- FRM_DYN,
- GPR:$vl, fvti.Log2SEW, TA_MA)>;
- let Predicates = [HasVInstructionsBF16Minimal] in
+ def : Pat<(fvti.Vector (any_riscv_fpround_vl
+ (fwti.Vector fwti.RegClass:$rs1),
+ (fwti.Mask VMV0:$vm), VLOpFrag)),
+ (!cast<Instruction>("PseudoVFNCVTBF16_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
+ (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
+ (fwti.Mask VMV0:$vm),
+ // Value to indicate no rounding mode change in
+ // RISCVInsertReadWriteCSR
+ FRM_DYN,
+ GPR:$vl, fvti.Log2SEW, TA_MA)>;
def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))),
(!cast<Instruction>("PseudoVFNCVTBF16_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW)
(fvti.Vector (IMPLICIT_DEF)),
@@ -87,6 +438,130 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in {
FRM_DYN,
fvti.AVL, fvti.Log2SEW, TA_MA)>;
}
+
+ defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllBF16Vectors>;
+ defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
+ AllBF16Vectors, uimm5>;
+ defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
+ eew=16, vtilist=AllBF16Vectors>;
+ defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllBF16Vectors, uimm5>;
+ defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllBF16Vectors, uimm5>;
+
+ foreach fvti = AllBF16Vectors in {
+ defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM",
+ fvti.Vector,
+ fvti.Vector, fvti.Vector, fvti.Mask,
+ fvti.Log2SEW, fvti.LMul, fvti.RegClass,
+ fvti.RegClass, fvti.RegClass>;
+ defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE",
+ "V"#fvti.ScalarSuffix#"M",
+ fvti.Vector,
+ fvti.Vector, fvti.Scalar, fvti.Mask,
+ fvti.Log2SEW, fvti.LMul, fvti.RegClass,
+ fvti.RegClass, fvti.ScalarRegClass>;
+ defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
+ def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$passthru),
+ (fvti.Vector fvti.RegClass:$rs2),
+ (fvti.Scalar (fpimm0)),
+ (fvti.Mask VMV0:$vm), VLOpFrag)),
+ (instr fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0,
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
+
+ defvar ivti = GetIntVTypeInfo<fvti>.Vti;
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1,
+ fvti.RegClass:$rs2)),
+ (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
+ (fvti.Vector (IMPLICIT_DEF)),
+ fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm),
+ fvti.AVL, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
+ (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))),
+ fvti.RegClass:$rs2)),
+ (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX)
+ (fvti.Vector (IMPLICIT_DEF)),
+ fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
+ (SplatFPOp (fvti.Scalar fpimm0)),
+ fvti.RegClass:$rs2)),
+ (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
+ (fvti.Vector (IMPLICIT_DEF)),
+ fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
+ (SplatFPOp fvti.ScalarRegClass:$rs1),
+ fvti.RegClass:$rs2)),
+ (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
+ (fvti.Vector (IMPLICIT_DEF)),
+ fvti.RegClass:$rs2,
+ (fvti.Scalar fvti.ScalarRegClass:$rs1),
+ (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
+ fvti.RegClass:$rs1,
+ fvti.RegClass:$rs2,
+ fvti.RegClass:$passthru,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm),
+ GPR:$vl, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
+ (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))),
+ fvti.RegClass:$rs2,
+ fvti.RegClass:$passthru,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX)
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm),
+ GPR:$vl, fvti.Log2SEW)>;
+
+
+ def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
+ (SplatFPOp (fvti.Scalar fpimm0)),
+ fvti.RegClass:$rs2,
+ fvti.RegClass:$passthru,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm),
+ GPR:$vl, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
+ (SplatFPOp fvti.ScalarRegClass:$rs1),
+ fvti.RegClass:$rs2,
+ fvti.RegClass:$passthru,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2,
+ (fvti.Scalar fvti.ScalarRegClass:$rs1),
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector
+ (riscv_vrgather_vv_vl fvti.RegClass:$rs2,
+ (ivti.Vector fvti.RegClass:$rs1),
+ fvti.RegClass:$passthru,
+ (fvti.Mask VMV0:$vm),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVRGATHER_VV_"# fvti.LMul.MX#"_E"# fvti.SEW#"_MASK")
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1,
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+ def : Pat<(fvti.Vector (riscv_vrgather_vx_vl fvti.RegClass:$rs2, GPR:$rs1,
+ fvti.RegClass:$passthru,
+ (fvti.Mask VMV0:$vm),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVRGATHER_VX_"# fvti.LMul.MX#"_MASK")
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$rs1,
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+ def : Pat<(fvti.Vector
+ (riscv_vrgather_vx_vl fvti.RegClass:$rs2,
+ uimm5:$imm,
+ fvti.RegClass:$passthru,
+ (fvti.Mask VMV0:$vm),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVRGATHER_VI_"# fvti.LMul.MX#"_MASK")
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, uimm5:$imm,
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+ }
}
let Predicates = [HasStdExtZvfbfwma] in {
@@ -97,3 +572,224 @@ let Predicates = [HasStdExtZvfbfwma] in {
defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACCBF16",
AllWidenableBF16ToFloatVectors>;
}
+
+multiclass VPatConversionVI_VF_BF16<string intrinsic, string instruction> {
+ foreach fvti = AllBF16Vectors in {
+ defvar ivti = GetIntVTypeInfo<fvti>.Vti;
+ let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
+ GetVTypePredicates<ivti>.Predicates) in
+ defm : VPatConversion<intrinsic, instruction, "V",
+ ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
+ fvti.LMul, ivti.RegClass, fvti.RegClass>;
+ }
+}
+
+multiclass VPatConversionWF_VI_BF16<string intrinsic, string instruction,
+ bit isSEWAware = 0> {
+ foreach vtiToWti = AllWidenableIntToBF16Vectors in {
+ defvar vti = vtiToWti.Vti;
+ defvar fwti = vtiToWti.Wti;
+ let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
+ GetVTypePredicates<fwti>.Predicates) in
+ defm : VPatConversion<intrinsic, instruction, "V",
+ fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
+ vti.LMul, fwti.RegClass, vti.RegClass, isSEWAware>;
+ }
+}
+
+multiclass VPatConversionWF_VF_BF16<string intrinsic, string instruction,
+ bit isSEWAware = 0> {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
+ defvar fvti = fvtiToFWti.Vti;
+ defvar fwti = fvtiToFWti.Wti;
+ let Predicates = !listconcat(GetVTypeMinimalPredicates<fvti>.Predicates,
+ GetVTypeMinimalPredicates<fwti>.Predicates) in
+ defm : VPatConversion<intrinsic, instruction, "V",
+ fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
+ fvti.LMul, fwti.RegClass, fvti.RegClass, isSEWAware>;
+ }
+}
+
+multiclass VPatConversionVI_WF_BF16<string intrinsic, string instruction> {
+ foreach vtiToWti = AllWidenableIntToBF16Vectors in {
+ defvar vti = vtiToWti.Vti;
+ defvar fwti = vtiToWti.Wti;
+ let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
+ GetVTypePredicates<fwti>.Predicates) in
+ defm : VPatConversion<intrinsic, instruction, "W",
+ vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
+ vti.LMul, vti.RegClass, fwti.RegClass>;
+ }
+}
+
+multiclass VPatConversionVI_WF_RM_BF16<string intrinsic, string instruction> {
+ foreach vtiToWti = AllWidenableIntToBF16Vectors in {
+ defvar vti = vtiToWti.Vti;
+ defvar fwti = vtiToWti.Wti;
+ let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
+ GetVTypePredicates<fwti>.Predicates) in
+ defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
+ vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
+ vti.LMul, vti.RegClass, fwti.RegClass>;
+ }
+}
+
+multiclass VPatConversionVF_WF_BF16<string intrinsic, string instruction,
+ bit isSEWAware = 0> {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
+ defvar fvti = fvtiToFWti.Vti;
+ defvar fwti = fvtiToFWti.Wti;
+ let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
+ GetVTypePredicates<fwti>.Predicates) in
+ defm : VPatConversion<intrinsic, instruction, "W",
+ fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
+ fvti.LMul, fvti.RegClass, fwti.RegClass, isSEWAware>;
+ }
+}
+
+let Predicates = [HasStdExtZvfbfa] in {
+defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD_ALT",
+ AllBF16Vectors, isSEWAware = 1>;
+defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfsub", "PseudoVFSUB_ALT",
+ AllBF16Vectors, isSEWAware = 1>;
+defm : VPatBinaryV_VX_RM<"int_riscv_vfrsub", "PseudoVFRSUB_ALT",
+ AllBF16Vectors, isSEWAware = 1>;
+defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwadd", "PseudoVFWADD_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwsub", "PseudoVFWSUB_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwadd_w", "PseudoVFWADD_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwsub_w", "PseudoVFWSUB_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfmul", "PseudoVFMUL_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwmul", "PseudoVFWMUL_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmacc", "PseudoVFMACC_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmacc", "PseudoVFNMACC_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsac", "PseudoVFMSAC_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsac", "PseudoVFNMSAC_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmadd", "PseudoVFMADD_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmadd", "PseudoVFNMADD_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsub", "PseudoVFMSUB_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsub", "PseudoVFNMSUB_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmacc", "PseudoVFWMACC_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmacc", "PseudoVFWNMACC_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmsac", "PseudoVFWMSAC_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmsac", "PseudoVFWNMSAC_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatUnaryV_V_RM<"int_riscv_vfrec7", "PseudoVFREC7_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ_ALT", AllBF16Vectors>;
+defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE_ALT", AllBF16Vectors>;
+defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT_ALT", AllBF16Vectors>;
+defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE_ALT", AllBF16Vectors>;
+defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT_ALT", AllBF16Vectors>;
+defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE_ALT", AllBF16Vectors>;
+defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT_ALT", AllBF16Vectors>;
+defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE_ALT", AllBF16Vectors>;
+defm : VPatConversionVI_VF_BF16<"int_riscv_vfclass", "PseudoVFCLASS_ALT">;
+foreach vti = AllBF16Vectors in {
+ let Predicates = GetVTypePredicates<vti>.Predicates in
+ defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE_ALT",
+ "V"#vti.ScalarSuffix#"M",
+ vti.Vector,
+ vti.Vector, vti.Scalar, vti.Mask,
+ vti.Log2SEW, vti.LMul, vti.RegClass,
+ vti.RegClass, vti.ScalarRegClass>;
+}
+defm : VPatConversionWF_VI_BF16<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU_ALT",
+ isSEWAware=1>;
+defm : VPatConversionWF_VI_BF16<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X_ALT",
+ isSEWAware=1>;
+defm : VPatConversionWF_VF_BF16<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F_ALT",
+ isSEWAware=1>;
+defm : VPatConversionVI_WF_RM_BF16<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F_ALT">;
+defm : VPatConversionVI_WF_RM_BF16<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F_ALT">;
+defm : VPatConversionVI_WF_BF16<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F_ALT">;
+defm : VPatConversionVI_WF_BF16<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F_ALT">;
+defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatConversionVF_WF_BF16<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F_ALT",
+ isSEWAware=1>;
+defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP_ALT", AllBF16Vectors>;
+defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN_ALT", AllBF16Vectors>;
+
+foreach fvti = AllBF16Vectors in {
+ defvar ivti = GetIntVTypeInfo<fvti>.Vti;
+ let Predicates = GetVTypePredicates<ivti>.Predicates in {
+ // 13.16. Vector Floating-Point Move Instruction
+ // If we're splatting fpimm0, use vmv.v.x vd, x0.
+ def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
+ fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)),
+ (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
+ $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>;
+ def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
+ fvti.Vector:$passthru, (fvti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))), VLOpFrag)),
+ (!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX)
+ $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>;
+ }
+
+ let Predicates = GetVTypePredicates<fvti>.Predicates in {
+ def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
+ fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
+ (!cast<Instruction>("PseudoVFMV_V_ALT_" # fvti.ScalarSuffix # "_" #
+ fvti.LMul.MX)
+ $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2),
+ GPR:$vl, fvti.Log2SEW, TU_MU)>;
+ }
+}
+
+foreach vti = NoGroupBF16Vectors in {
+ let Predicates = GetVTypePredicates<vti>.Predicates in {
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru),
+ (vti.Scalar (fpimm0)),
+ VLOpFrag)),
+ (PseudoVMV_S_X $passthru, (XLenVT X0), GPR:$vl, vti.Log2SEW)>;
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru),
+ (vti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))),
+ VLOpFrag)),
+ (PseudoVMV_S_X $passthru, GPR:$imm, GPR:$vl, vti.Log2SEW)>;
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru),
+ vti.ScalarRegClass:$rs1,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_ALT")
+ vti.RegClass:$passthru,
+ (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
+ }
+
+ defvar vfmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_",
+ vti.ScalarSuffix,
+ "_S_ALT"));
+ // Only pattern-match extract-element operations where the index is 0. Any
+ // other index will have been custom-lowered to slide the vector correctly
+ // into place.
+ let Predicates = GetVTypePredicates<vti>.Predicates in
+ def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)),
+ (vfmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>;
+}
+} // Predicates = [HasStdExtZvfbfa]
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 6acf799..334db4b 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -288,9 +288,12 @@ public:
bool hasVInstructionsI64() const { return HasStdExtZve64x; }
bool hasVInstructionsF16Minimal() const { return HasStdExtZvfhmin; }
bool hasVInstructionsF16() const { return HasStdExtZvfh; }
- bool hasVInstructionsBF16Minimal() const { return HasStdExtZvfbfmin; }
+ bool hasVInstructionsBF16Minimal() const {
+ return HasStdExtZvfbfmin || HasStdExtZvfbfa;
+ }
bool hasVInstructionsF32() const { return HasStdExtZve32f; }
bool hasVInstructionsF64() const { return HasStdExtZve64d; }
+ bool hasVInstructionsBF16() const { return HasStdExtZvfbfa; }
// F16 and F64 both require F32.
bool hasVInstructionsAnyF() const { return hasVInstructionsF32(); }
bool hasVInstructionsFullMultiply() const { return HasStdExtV; }
diff --git a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
index 56a6168..640b014 100644
--- a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
@@ -78,6 +78,8 @@ public:
void outputExecutionModeFromNumthreadsAttribute(
const MCRegister &Reg, const Attribute &Attr,
SPIRV::ExecutionMode::ExecutionMode EM);
+ void outputExecutionModeFromEnableMaximalReconvergenceAttr(
+ const MCRegister &Reg, const SPIRVSubtarget &ST);
void outputExecutionMode(const Module &M);
void outputAnnotations(const Module &M);
void outputModuleSections();
@@ -495,6 +497,20 @@ void SPIRVAsmPrinter::outputExecutionModeFromNumthreadsAttribute(
outputMCInst(Inst);
}
+void SPIRVAsmPrinter::outputExecutionModeFromEnableMaximalReconvergenceAttr(
+ const MCRegister &Reg, const SPIRVSubtarget &ST) {
+ assert(ST.canUseExtension(SPIRV::Extension::SPV_KHR_maximal_reconvergence) &&
+ "Function called when SPV_KHR_maximal_reconvergence is not enabled.");
+
+ MCInst Inst;
+ Inst.setOpcode(SPIRV::OpExecutionMode);
+ Inst.addOperand(MCOperand::createReg(Reg));
+ unsigned EM =
+ static_cast<unsigned>(SPIRV::ExecutionMode::MaximallyReconvergesKHR);
+ Inst.addOperand(MCOperand::createImm(EM));
+ outputMCInst(Inst);
+}
+
void SPIRVAsmPrinter::outputExecutionMode(const Module &M) {
NamedMDNode *Node = M.getNamedMetadata("spirv.ExecutionMode");
if (Node) {
@@ -551,6 +567,10 @@ void SPIRVAsmPrinter::outputExecutionMode(const Module &M) {
if (Attribute Attr = F.getFnAttribute("hlsl.numthreads"); Attr.isValid())
outputExecutionModeFromNumthreadsAttribute(
FReg, Attr, SPIRV::ExecutionMode::LocalSize);
+ if (Attribute Attr = F.getFnAttribute("enable-maximal-reconvergence");
+ Attr.getValueAsBool()) {
+ outputExecutionModeFromEnableMaximalReconvergenceAttr(FReg, *ST);
+ }
if (MDNode *Node = F.getMetadata("work_group_size_hint"))
outputExecutionModeFromMDNode(FReg, Node,
SPIRV::ExecutionMode::LocalSizeHint, 3, 1);
diff --git a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
index 5f3ed86..96f5dee 100644
--- a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
@@ -153,7 +153,9 @@ static const std::map<std::string, SPIRV::Extension::Extension, std::less<>>
SPIRV::Extension::Extension::
SPV_EXT_relaxed_printf_string_address_space},
{"SPV_INTEL_predicated_io",
- SPIRV::Extension::Extension::SPV_INTEL_predicated_io}};
+ SPIRV::Extension::Extension::SPV_INTEL_predicated_io},
+ {"SPV_KHR_maximal_reconvergence",
+ SPIRV::Extension::Extension::SPV_KHR_maximal_reconvergence}};
bool SPIRVExtensionsParser::parse(cl::Option &O, StringRef ArgName,
StringRef ArgValue,
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index c6c6182..a151fd2 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -1392,19 +1392,19 @@ void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
Constant *AggrConst = nullptr;
Type *ResTy = nullptr;
if (auto *COp = dyn_cast<ConstantVector>(Op)) {
- AggrConst = cast<Constant>(COp);
+ AggrConst = COp;
ResTy = COp->getType();
} else if (auto *COp = dyn_cast<ConstantArray>(Op)) {
- AggrConst = cast<Constant>(COp);
+ AggrConst = COp;
ResTy = B.getInt32Ty();
} else if (auto *COp = dyn_cast<ConstantStruct>(Op)) {
- AggrConst = cast<Constant>(COp);
+ AggrConst = COp;
ResTy = B.getInt32Ty();
} else if (auto *COp = dyn_cast<ConstantDataArray>(Op)) {
- AggrConst = cast<Constant>(COp);
+ AggrConst = COp;
ResTy = B.getInt32Ty();
} else if (auto *COp = dyn_cast<ConstantAggregateZero>(Op)) {
- AggrConst = cast<Constant>(COp);
+ AggrConst = COp;
ResTy = Op->getType()->isVectorTy() ? COp->getType() : B.getInt32Ty();
}
if (AggrConst) {
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index 5144fb1..61a0bbe 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -1200,6 +1200,23 @@ void addOpAccessChainReqs(const MachineInstr &Instr,
return;
}
+ bool IsNonUniform =
+ hasNonUniformDecoration(Instr.getOperand(0).getReg(), MRI);
+
+ auto FirstIndexReg = Instr.getOperand(3).getReg();
+ bool FirstIndexIsConstant =
+ Subtarget.getInstrInfo()->isConstantInstr(*MRI.getVRegDef(FirstIndexReg));
+
+ if (StorageClass == SPIRV::StorageClass::StorageClass::StorageBuffer) {
+ if (IsNonUniform)
+ Handler.addRequirements(
+ SPIRV::Capability::StorageBufferArrayNonUniformIndexingEXT);
+ else if (!FirstIndexIsConstant)
+ Handler.addRequirements(
+ SPIRV::Capability::StorageBufferArrayDynamicIndexing);
+ return;
+ }
+
Register PointeeTypeReg = ResTypeInst->getOperand(2).getReg();
MachineInstr *PointeeType = MRI.getUniqueVRegDef(PointeeTypeReg);
if (PointeeType->getOpcode() != SPIRV::OpTypeImage &&
@@ -1208,27 +1225,25 @@ void addOpAccessChainReqs(const MachineInstr &Instr,
return;
}
- bool IsNonUniform =
- hasNonUniformDecoration(Instr.getOperand(0).getReg(), MRI);
if (isUniformTexelBuffer(PointeeType)) {
if (IsNonUniform)
Handler.addRequirements(
SPIRV::Capability::UniformTexelBufferArrayNonUniformIndexingEXT);
- else
+ else if (!FirstIndexIsConstant)
Handler.addRequirements(
SPIRV::Capability::UniformTexelBufferArrayDynamicIndexingEXT);
} else if (isInputAttachment(PointeeType)) {
if (IsNonUniform)
Handler.addRequirements(
SPIRV::Capability::InputAttachmentArrayNonUniformIndexingEXT);
- else
+ else if (!FirstIndexIsConstant)
Handler.addRequirements(
SPIRV::Capability::InputAttachmentArrayDynamicIndexingEXT);
} else if (isStorageTexelBuffer(PointeeType)) {
if (IsNonUniform)
Handler.addRequirements(
SPIRV::Capability::StorageTexelBufferArrayNonUniformIndexingEXT);
- else
+ else if (!FirstIndexIsConstant)
Handler.addRequirements(
SPIRV::Capability::StorageTexelBufferArrayDynamicIndexingEXT);
} else if (isSampledImage(PointeeType) ||
@@ -1237,14 +1252,14 @@ void addOpAccessChainReqs(const MachineInstr &Instr,
if (IsNonUniform)
Handler.addRequirements(
SPIRV::Capability::SampledImageArrayNonUniformIndexingEXT);
- else
+ else if (!FirstIndexIsConstant)
Handler.addRequirements(
SPIRV::Capability::SampledImageArrayDynamicIndexing);
} else if (isStorageImage(PointeeType)) {
if (IsNonUniform)
Handler.addRequirements(
SPIRV::Capability::StorageImageArrayNonUniformIndexingEXT);
- else
+ else if (!FirstIndexIsConstant)
Handler.addRequirements(
SPIRV::Capability::StorageImageArrayDynamicIndexing);
}
@@ -2155,6 +2170,9 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
SPIRV::OperandCategory::ExecutionModeOperand,
SPIRV::ExecutionMode::LocalSize, ST);
}
+ if (F.getFnAttribute("enable-maximal-reconvergence").getValueAsBool()) {
+ MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_maximal_reconvergence);
+ }
if (F.getMetadata("work_group_size_hint"))
MAI.Reqs.getAndAddRequirements(
SPIRV::OperandCategory::ExecutionModeOperand,
diff --git a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
index 2625642..7d08b29 100644
--- a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
+++ b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
@@ -386,6 +386,7 @@ defm SPV_KHR_float_controls2 : ExtensionOperand<124, [EnvVulkan, EnvOpenCL]>;
defm SPV_INTEL_tensor_float32_conversion : ExtensionOperand<125, [EnvOpenCL]>;
defm SPV_KHR_bfloat16 : ExtensionOperand<126, [EnvVulkan, EnvOpenCL]>;
defm SPV_INTEL_predicated_io : ExtensionOperand<127, [EnvOpenCL]>;
+defm SPV_KHR_maximal_reconvergence : ExtensionOperand<128, [EnvVulkan]>;
//===----------------------------------------------------------------------===//
// Multiclass used to define Capabilities enum values and at the same time
@@ -698,7 +699,7 @@ defm IntersectionNV: ExecutionModelOperand<5314, [RayTracingNV]>;
defm AnyHitNV: ExecutionModelOperand<5315, [RayTracingNV]>;
defm ClosestHitNV: ExecutionModelOperand<5316, [RayTracingNV]>;
defm MissNV: ExecutionModelOperand<5317, [RayTracingNV]>;
-defm CallableNV: ExecutionModelOperand<5318, [RayTracingNV]>;
+defm CallableNV : ExecutionModelOperand<5318, [RayTracingNV]>;
//===----------------------------------------------------------------------===//
// Multiclass used to define MemoryModel enum values and at the same time
@@ -805,6 +806,7 @@ defm RoundingModeRTNINTEL : ExecutionModeOperand<5621, [RoundToInfinityINTEL]>;
defm FloatingPointModeALTINTEL : ExecutionModeOperand<5622, [FloatingPointModeINTEL]>;
defm FloatingPointModeIEEEINTEL : ExecutionModeOperand<5623, [FloatingPointModeINTEL]>;
defm FPFastMathDefault : ExecutionModeOperand<6028, [FloatControls2]>;
+defm MaximallyReconvergesKHR : ExecutionModeOperand<6023, [Shader]>;
//===----------------------------------------------------------------------===//
// Multiclass used to define StorageClass enum values and at the same time
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a0b64ff..b05d7c7 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -29755,65 +29755,30 @@ static SDValue LowervXi8MulWithUNPCK(SDValue A, SDValue B, const SDLoc &dl,
const X86Subtarget &Subtarget,
SelectionDAG &DAG,
SDValue *Low = nullptr) {
- unsigned NumElts = VT.getVectorNumElements();
-
// For vXi8 we will unpack the low and high half of each 128 bit lane to widen
// to a vXi16 type. Do the multiplies, shift the results and pack the half
// lane results back together.
// We'll take different approaches for signed and unsigned.
- // For unsigned we'll use punpcklbw/punpckhbw to put zero extend the bytes
- // and use pmullw to calculate the full 16-bit product.
+ // For unsigned we'll use punpcklbw/punpckhbw to zero extend the bytes to
+ // words and use pmullw to calculate the full 16-bit product.
// For signed we'll use punpcklbw/punpckbw to extend the bytes to words and
// shift them left into the upper byte of each word. This allows us to use
// pmulhw to calculate the full 16-bit product. This trick means we don't
// need to sign extend the bytes to use pmullw.
-
- MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
+ MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
SDValue Zero = DAG.getConstant(0, dl, VT);
- SDValue ALo, AHi;
+ SDValue ALo, AHi, BLo, BHi;
if (IsSigned) {
ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, A));
- AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, A));
- } else {
- ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Zero));
- AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Zero));
- }
-
- SDValue BLo, BHi;
- if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
- // If the RHS is a constant, manually unpackl/unpackh and extend.
- SmallVector<SDValue, 16> LoOps, HiOps;
- for (unsigned i = 0; i != NumElts; i += 16) {
- for (unsigned j = 0; j != 8; ++j) {
- SDValue LoOp = B.getOperand(i + j);
- SDValue HiOp = B.getOperand(i + j + 8);
-
- if (IsSigned) {
- LoOp = DAG.getAnyExtOrTrunc(LoOp, dl, MVT::i16);
- HiOp = DAG.getAnyExtOrTrunc(HiOp, dl, MVT::i16);
- LoOp = DAG.getNode(ISD::SHL, dl, MVT::i16, LoOp,
- DAG.getConstant(8, dl, MVT::i16));
- HiOp = DAG.getNode(ISD::SHL, dl, MVT::i16, HiOp,
- DAG.getConstant(8, dl, MVT::i16));
- } else {
- LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
- HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
- }
-
- LoOps.push_back(LoOp);
- HiOps.push_back(HiOp);
- }
- }
-
- BLo = DAG.getBuildVector(ExVT, dl, LoOps);
- BHi = DAG.getBuildVector(ExVT, dl, HiOps);
- } else if (IsSigned) {
BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, B));
+ AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, A));
BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, B));
} else {
+ ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Zero));
BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Zero));
+ AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Zero));
BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Zero));
}
@@ -29826,7 +29791,7 @@ static SDValue LowervXi8MulWithUNPCK(SDValue A, SDValue B, const SDLoc &dl,
if (Low)
*Low = getPack(DAG, Subtarget, dl, VT, RLo, RHi);
- return getPack(DAG, Subtarget, dl, VT, RLo, RHi, /*PackHiHalf*/ true);
+ return getPack(DAG, Subtarget, dl, VT, RLo, RHi, /*PackHiHalf=*/true);
}
static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
@@ -44848,10 +44813,16 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
}
case X86ISD::PCMPGT:
// icmp sgt(0, R) == ashr(R, BitWidth-1).
- // iff we only need the sign bit then we can use R directly.
- if (OriginalDemandedBits.isSignMask() &&
- ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
- return TLO.CombineTo(Op, Op.getOperand(1));
+ if (ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode())) {
+ // iff we only need the signbit then we can use R directly.
+ if (OriginalDemandedBits.isSignMask())
+ return TLO.CombineTo(Op, Op.getOperand(1));
+ // otherwise we just need R's signbit for the comparison.
+ APInt SignMask = APInt::getSignMask(BitWidth);
+ if (SimplifyDemandedBits(Op.getOperand(1), SignMask, OriginalDemandedElts,
+ Known, TLO, Depth + 1))
+ return true;
+ }
break;
case X86ISD::MOVMSK: {
SDValue Src = Op.getOperand(0);
@@ -47761,6 +47732,15 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
DL, DAG, Subtarget))
return V;
+ // If the sign bit is known then BLENDV can be folded away.
+ if (N->getOpcode() == X86ISD::BLENDV) {
+ KnownBits KnownCond = DAG.computeKnownBits(Cond);
+ if (KnownCond.isNegative())
+ return LHS;
+ if (KnownCond.isNonNegative())
+ return RHS;
+ }
+
if (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::BLENDV) {
SmallVector<int, 64> CondMask;
if (createShuffleMaskFromVSELECT(CondMask, Cond,
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 6dd43b2..37d7772 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -606,16 +606,24 @@ Value *X86TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
void X86TargetLowering::insertSSPDeclarations(Module &M) const {
// MSVC CRT provides functionalities for stack protection.
- if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
- Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
+ RTLIB::LibcallImpl SecurityCheckCookieLibcall =
+ getLibcallImpl(RTLIB::SECURITY_CHECK_COOKIE);
+
+ RTLIB::LibcallImpl SecurityCookieVar =
+ getLibcallImpl(RTLIB::STACK_CHECK_GUARD);
+ if (SecurityCheckCookieLibcall != RTLIB::Unsupported &&
+ SecurityCookieVar != RTLIB::Unsupported) {
+ // MSVC CRT provides functionalities for stack protection.
// MSVC CRT has a global variable holding security cookie.
- M.getOrInsertGlobal("__security_cookie",
+ M.getOrInsertGlobal(getLibcallImplName(SecurityCookieVar),
PointerType::getUnqual(M.getContext()));
// MSVC CRT has a function to validate security cookie.
- FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
- "__security_check_cookie", Type::getVoidTy(M.getContext()),
- PointerType::getUnqual(M.getContext()));
+ FunctionCallee SecurityCheckCookie =
+ M.getOrInsertFunction(getLibcallImplName(SecurityCheckCookieLibcall),
+ Type::getVoidTy(M.getContext()),
+ PointerType::getUnqual(M.getContext()));
+
if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
F->setCallingConv(CallingConv::X86_FastCall);
F->addParamAttr(0, Attribute::AttrKind::InReg);
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 1d2cd39..5c23f91 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -10809,39 +10809,27 @@ void X86InstrInfo::buildClearRegister(Register Reg, MachineBasicBlock &MBB,
if (!ST.hasSSE1())
return;
- // PXOR is safe to use because it doesn't affect flags.
- BuildMI(MBB, Iter, DL, get(X86::PXORrr), Reg)
- .addReg(Reg, RegState::Undef)
- .addReg(Reg, RegState::Undef);
+ BuildMI(MBB, Iter, DL, get(X86::V_SET0), Reg);
} else if (X86::VR256RegClass.contains(Reg)) {
// YMM#
if (!ST.hasAVX())
return;
- // VPXOR is safe to use because it doesn't affect flags.
- BuildMI(MBB, Iter, DL, get(X86::VPXORrr), Reg)
- .addReg(Reg, RegState::Undef)
- .addReg(Reg, RegState::Undef);
+ BuildMI(MBB, Iter, DL, get(X86::AVX_SET0), Reg);
} else if (X86::VR512RegClass.contains(Reg)) {
// ZMM#
if (!ST.hasAVX512())
return;
- // VPXORY is safe to use because it doesn't affect flags.
- BuildMI(MBB, Iter, DL, get(X86::VPXORYrr), Reg)
- .addReg(Reg, RegState::Undef)
- .addReg(Reg, RegState::Undef);
+ BuildMI(MBB, Iter, DL, get(X86::AVX512_512_SET0), Reg);
} else if (X86::VK1RegClass.contains(Reg) || X86::VK2RegClass.contains(Reg) ||
X86::VK4RegClass.contains(Reg) || X86::VK8RegClass.contains(Reg) ||
X86::VK16RegClass.contains(Reg)) {
if (!ST.hasVLX())
return;
- // KXOR is safe to use because it doesn't affect flags.
- unsigned Op = ST.hasBWI() ? X86::KXORQkk : X86::KXORWkk;
- BuildMI(MBB, Iter, DL, get(Op), Reg)
- .addReg(Reg, RegState::Undef)
- .addReg(Reg, RegState::Undef);
+ unsigned Op = ST.hasBWI() ? X86::KSET0Q : X86::KSET0W;
+ BuildMI(MBB, Iter, DL, get(Op), Reg);
}
}
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index 1fca466f..713d504 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -1928,6 +1928,17 @@ static void addConstantComments(const MachineInstr *MI,
#define INSTR_CASE(Prefix, Instr, Suffix, Postfix) \
case X86::Prefix##Instr##Suffix##rm##Postfix:
+#define CASE_AVX512_ARITH_RM(Instr) \
+ INSTR_CASE(V, Instr, Z128, ) \
+ INSTR_CASE(V, Instr, Z128, k) \
+ INSTR_CASE(V, Instr, Z128, kz) \
+ INSTR_CASE(V, Instr, Z256, ) \
+ INSTR_CASE(V, Instr, Z256, k) \
+ INSTR_CASE(V, Instr, Z256, kz) \
+ INSTR_CASE(V, Instr, Z, ) \
+ INSTR_CASE(V, Instr, Z, k) \
+ INSTR_CASE(V, Instr, Z, kz)
+
#define CASE_ARITH_RM(Instr) \
INSTR_CASE(, Instr, , ) /* SSE */ \
INSTR_CASE(V, Instr, , ) /* AVX-128 */ \
@@ -1943,22 +1954,12 @@ static void addConstantComments(const MachineInstr *MI,
INSTR_CASE(V, Instr, Z, kz)
// TODO: Add additional instructions when useful.
- CASE_ARITH_RM(PMADDUBSW) {
- unsigned SrcIdx = getSrcIdx(MI, 1);
- if (auto *C = X86::getConstantFromPool(*MI, SrcIdx + 1)) {
- std::string Comment;
- raw_string_ostream CS(Comment);
- unsigned VectorWidth =
- X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
- CS << "[";
- printConstant(C, VectorWidth, CS);
- CS << "]";
- OutStreamer.AddComment(CS.str());
- }
- break;
- }
-
+ CASE_ARITH_RM(PMADDUBSW)
CASE_ARITH_RM(PMADDWD)
+ CASE_ARITH_RM(PMULDQ)
+ CASE_ARITH_RM(PMULUDQ)
+ CASE_ARITH_RM(PMULLD)
+ CASE_AVX512_ARITH_RM(PMULLQ)
CASE_ARITH_RM(PMULLW)
CASE_ARITH_RM(PMULHW)
CASE_ARITH_RM(PMULHUW)
diff --git a/llvm/lib/Transforms/CFGuard/CFGuard.cpp b/llvm/lib/Transforms/CFGuard/CFGuard.cpp
index b73a0ce..4645670 100644
--- a/llvm/lib/Transforms/CFGuard/CFGuard.cpp
+++ b/llvm/lib/Transforms/CFGuard/CFGuard.cpp
@@ -147,7 +147,7 @@ public:
private:
// Only add checks if the module has the cfguard=2 flag.
- int cfguard_module_flag = 0;
+ int CFGuardModuleFlag = 0;
StringRef GuardFnName;
Mechanism GuardMechanism = Mechanism::Check;
FunctionType *GuardFnType = nullptr;
@@ -162,9 +162,7 @@ public:
static char ID;
// Default constructor required for the INITIALIZE_PASS macro.
- CFGuard(CFGuardImpl::Mechanism M) : FunctionPass(ID), Impl(M) {
- initializeCFGuardPass(*PassRegistry::getPassRegistry());
- }
+ CFGuard(CFGuardImpl::Mechanism M) : FunctionPass(ID), Impl(M) {}
bool doInitialization(Module &M) override { return Impl.doInitialization(M); }
bool runOnFunction(Function &F) override { return Impl.runOnFunction(F); }
@@ -173,7 +171,6 @@ public:
} // end anonymous namespace
void CFGuardImpl::insertCFGuardCheck(CallBase *CB) {
-
assert(CB->getModule()->getTargetTriple().isOSWindows() &&
"Only applicable for Windows targets");
assert(CB->isIndirectCall() &&
@@ -202,7 +199,6 @@ void CFGuardImpl::insertCFGuardCheck(CallBase *CB) {
}
void CFGuardImpl::insertCFGuardDispatch(CallBase *CB) {
-
assert(CB->getModule()->getTargetTriple().isOSWindows() &&
"Only applicable for Windows targets");
assert(CB->isIndirectCall() &&
@@ -236,14 +232,13 @@ void CFGuardImpl::insertCFGuardDispatch(CallBase *CB) {
}
bool CFGuardImpl::doInitialization(Module &M) {
-
// Check if this module has the cfguard flag and read its value.
if (auto *MD =
mdconst::extract_or_null<ConstantInt>(M.getModuleFlag("cfguard")))
- cfguard_module_flag = MD->getZExtValue();
+ CFGuardModuleFlag = MD->getZExtValue();
// Skip modules for which CFGuard checks have been disabled.
- if (cfguard_module_flag != 2)
+ if (CFGuardModuleFlag != 2)
return false;
// Set up prototypes for the guard check and dispatch functions.
@@ -264,9 +259,8 @@ bool CFGuardImpl::doInitialization(Module &M) {
}
bool CFGuardImpl::runOnFunction(Function &F) {
-
// Skip modules for which CFGuard checks have been disabled.
- if (cfguard_module_flag != 2)
+ if (CFGuardModuleFlag != 2)
return false;
SmallVector<CallBase *, 8> IndirectCalls;
@@ -286,19 +280,16 @@ bool CFGuardImpl::runOnFunction(Function &F) {
}
// If no checks are needed, return early.
- if (IndirectCalls.empty()) {
+ if (IndirectCalls.empty())
return false;
- }
// For each indirect call/invoke, add the appropriate dispatch or check.
if (GuardMechanism == Mechanism::Dispatch) {
- for (CallBase *CB : IndirectCalls) {
+ for (CallBase *CB : IndirectCalls)
insertCFGuardDispatch(CB);
- }
} else {
- for (CallBase *CB : IndirectCalls) {
+ for (CallBase *CB : IndirectCalls)
insertCFGuardCheck(CB);
- }
}
return true;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 07ad65c..fba1ccf 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1481,13 +1481,13 @@ Instruction *InstCombinerImpl::foldICmpTruncConstant(ICmpInst &Cmp,
return new ICmpInst(Pred, Y, ConstantInt::get(SrcTy, C.logBase2()));
}
- if (Cmp.isEquality() && Trunc->hasOneUse()) {
+ if (Cmp.isEquality() && (Trunc->hasOneUse() || Trunc->hasNoUnsignedWrap())) {
// Canonicalize to a mask and wider compare if the wide type is suitable:
// (trunc X to i8) == C --> (X & 0xff) == (zext C)
if (!SrcTy->isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
Constant *Mask =
ConstantInt::get(SrcTy, APInt::getLowBitsSet(SrcBits, DstBits));
- Value *And = Builder.CreateAnd(X, Mask);
+ Value *And = Trunc->hasNoUnsignedWrap() ? X : Builder.CreateAnd(X, Mask);
Constant *WideC = ConstantInt::get(SrcTy, C.zext(SrcBits));
return new ICmpInst(Pred, And, WideC);
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 09cb225..a8eb9b9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -3757,6 +3757,10 @@ static Instruction *foldBitCeil(SelectInst &SI, IRBuilderBase &Builder,
// (x < y) ? -1 : zext(x > y)
// (x > y) ? 1 : sext(x != y)
// (x > y) ? 1 : sext(x < y)
+// (x == y) ? 0 : (x > y ? 1 : -1)
+// (x == y) ? 0 : (x < y ? -1 : 1)
+// Special case: x == C ? 0 : (x > C - 1 ? 1 : -1)
+// Special case: x == C ? 0 : (x < C + 1 ? -1 : 1)
// Into ucmp/scmp(x, y), where signedness is determined by the signedness
// of the comparison in the original sequence.
Instruction *InstCombinerImpl::foldSelectToCmp(SelectInst &SI) {
@@ -3849,6 +3853,44 @@ Instruction *InstCombinerImpl::foldSelectToCmp(SelectInst &SI) {
}
}
+ // Special cases with constants: x == C ? 0 : (x > C-1 ? 1 : -1)
+ if (Pred == ICmpInst::ICMP_EQ && match(TV, m_Zero())) {
+ const APInt *C;
+ if (match(RHS, m_APInt(C))) {
+ CmpPredicate InnerPred;
+ Value *InnerRHS;
+ const APInt *InnerTV, *InnerFV;
+ if (match(FV,
+ m_Select(m_ICmp(InnerPred, m_Specific(LHS), m_Value(InnerRHS)),
+ m_APInt(InnerTV), m_APInt(InnerFV)))) {
+
+ // x == C ? 0 : (x > C-1 ? 1 : -1)
+ if (ICmpInst::isGT(InnerPred) && InnerTV->isOne() &&
+ InnerFV->isAllOnes()) {
+ IsSigned = ICmpInst::isSigned(InnerPred);
+ bool CanSubOne = IsSigned ? !C->isMinSignedValue() : !C->isMinValue();
+ if (CanSubOne) {
+ APInt Cminus1 = *C - 1;
+ if (match(InnerRHS, m_SpecificInt(Cminus1)))
+ Replace = true;
+ }
+ }
+
+ // x == C ? 0 : (x < C+1 ? -1 : 1)
+ if (ICmpInst::isLT(InnerPred) && InnerTV->isAllOnes() &&
+ InnerFV->isOne()) {
+ IsSigned = ICmpInst::isSigned(InnerPred);
+ bool CanAddOne = IsSigned ? !C->isMaxSignedValue() : !C->isMaxValue();
+ if (CanAddOne) {
+ APInt Cplus1 = *C + 1;
+ if (match(InnerRHS, m_SpecificInt(Cplus1)))
+ Replace = true;
+ }
+ }
+ }
+ }
+ }
+
Intrinsic::ID IID = IsSigned ? Intrinsic::scmp : Intrinsic::ucmp;
if (Replace)
return replaceInstUsesWith(
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 6e17801..2646334 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -844,6 +844,7 @@ struct AddressSanitizer {
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void markEscapedLocalAllocas(Function &F);
+ void markCatchParametersAsUninteresting(Function &F);
private:
friend struct FunctionStackPoisoner;
@@ -2997,6 +2998,22 @@ void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
}
}
}
+// Mitigation for https://github.com/google/sanitizers/issues/749
+// We don't instrument Windows catch-block parameters to avoid
+// interfering with exception handling assumptions.
+void AddressSanitizer::markCatchParametersAsUninteresting(Function &F) {
+ for (BasicBlock &BB : F) {
+ for (Instruction &I : BB) {
+ if (auto *CatchPad = dyn_cast<CatchPadInst>(&I)) {
+ // Mark the parameters to a catch-block as uninteresting to avoid
+ // instrumenting them.
+ for (Value *Operand : CatchPad->arg_operands())
+ if (auto *AI = dyn_cast<AllocaInst>(Operand))
+ ProcessedAllocas[AI] = false;
+ }
+ }
+ }
+}
bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
bool ShouldInstrument =
@@ -3041,6 +3058,9 @@ bool AddressSanitizer::instrumentFunction(Function &F,
// can be passed to that intrinsic.
markEscapedLocalAllocas(F);
+ if (TargetTriple.isOSWindows())
+ markCatchParametersAsUninteresting(F);
+
// We want to instrument every address only once per basic block (unless there
// are calls between uses).
SmallPtrSet<Value *, 16> TempsToInstrument;
diff --git a/llvm/lib/Transforms/Scalar/LoopPassManager.cpp b/llvm/lib/Transforms/Scalar/LoopPassManager.cpp
index 7da8586..d827e64 100644
--- a/llvm/lib/Transforms/Scalar/LoopPassManager.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPassManager.cpp
@@ -8,7 +8,6 @@
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include "llvm/Analysis/AssumptionCache.h"
-#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
@@ -217,9 +216,6 @@ PreservedAnalyses FunctionToLoopPassAdaptor::run(Function &F,
// Get the analysis results needed by loop passes.
MemorySSA *MSSA =
UseMemorySSA ? (&AM.getResult<MemorySSAAnalysis>(F).getMSSA()) : nullptr;
- BlockFrequencyInfo *BFI = UseBlockFrequencyInfo && F.hasProfileData()
- ? (&AM.getResult<BlockFrequencyAnalysis>(F))
- : nullptr;
LoopStandardAnalysisResults LAR = {AM.getResult<AAManager>(F),
AM.getResult<AssumptionAnalysis>(F),
AM.getResult<DominatorTreeAnalysis>(F),
@@ -227,7 +223,6 @@ PreservedAnalyses FunctionToLoopPassAdaptor::run(Function &F,
AM.getResult<ScalarEvolutionAnalysis>(F),
AM.getResult<TargetLibraryAnalysis>(F),
AM.getResult<TargetIRAnalysis>(F),
- BFI,
MSSA};
// Setup the loop analysis manager from its proxy. It is important that
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index 7cae94eb..3487e81 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -97,6 +97,12 @@ static cl::opt<MatrixLayoutTy> MatrixLayout(
static cl::opt<bool> PrintAfterTransposeOpt("matrix-print-after-transpose-opt",
cl::init(false));
+static cl::opt<unsigned> SplitMatmulRemainderOverThreshold(
+ "matrix-split-matmul-remainder-over-threshold", cl::Hidden,
+ cl::desc("Illegal remainder vectors over this size in bits should be split "
+ "in the inner loop of matmul"),
+ cl::init(0));
+
/// Helper function to either return Scope, if it is a subprogram or the
/// attached subprogram for a local scope.
static DISubprogram *getSubprogram(DIScope *Scope) {
@@ -115,18 +121,16 @@ static bool isSplat(Value *V) {
/// Match any mul operation (fp or integer).
template <typename LTy, typename RTy>
-auto m_AnyMul(const LTy &L, const RTy &R) {
+static auto m_AnyMul(const LTy &L, const RTy &R) {
return m_CombineOr(m_Mul(L, R), m_FMul(L, R));
}
/// Match any add operation (fp or integer).
template <typename LTy, typename RTy>
-auto m_AnyAdd(const LTy &L, const RTy &R) {
+static auto m_AnyAdd(const LTy &L, const RTy &R) {
return m_CombineOr(m_Add(L, R), m_FAdd(L, R));
}
-namespace {
-
// Given an element pointer \p BasePtr to the start of a (sub) matrix, compute
// the start address of vector \p VecIdx with type (\p EltType x \p NumElements)
// assuming \p Stride elements between start two consecutive vectors.
@@ -167,9 +171,9 @@ namespace {
// v_2_0 |v_2_1 |v_2_2 |v_2_3
// v_3_0 {v_3_1 {v_3_2 v_3_3
//
-Value *computeVectorAddr(Value *BasePtr, Value *VecIdx, Value *Stride,
- unsigned NumElements, Type *EltType,
- IRBuilder<> &Builder) {
+static Value *computeVectorAddr(Value *BasePtr, Value *VecIdx, Value *Stride,
+ unsigned NumElements, Type *EltType,
+ IRBuilder<> &Builder) {
assert((!isa<ConstantInt>(Stride) ||
cast<ConstantInt>(Stride)->getZExtValue() >= NumElements) &&
@@ -338,6 +342,8 @@ computeShapeInfoForInst(Instruction *I,
return std::nullopt;
}
+namespace {
+
/// LowerMatrixIntrinsics contains the methods used to lower matrix intrinsics.
///
/// Currently, the lowering for each matrix intrinsic is done as follows:
@@ -371,7 +377,8 @@ class LowerMatrixIntrinsics {
LoopInfo *LI = nullptr;
OptimizationRemarkEmitter *ORE = nullptr;
- /// Contains estimates of the number of operations (loads, stores, compute) required to lower a matrix operation.
+ /// Contains estimates of the number of operations (loads, stores, compute)
+ /// required to lower a matrix operation.
struct OpInfoTy {
/// Number of stores emitted to generate this matrix.
unsigned NumStores = 0;
@@ -1719,6 +1726,31 @@ public:
ToRemove.push_back(MatMul);
}
+ /// Given \p Remainder iterations of the the matmul inner loop,
+ /// potentially lower \p Blocksize that is used for the underlying
+ /// vector.
+ unsigned capBlockSize(unsigned BlockSize, unsigned Remainder, Type *EltType) {
+ if (BlockSize <= Remainder)
+ return BlockSize;
+
+ // If the remainder is also a legal type just use it.
+ auto *VecTy = FixedVectorType::get(EltType, Remainder);
+ if (TTI.isTypeLegal(VecTy))
+ return Remainder;
+
+ // Similarly, if the vector is small enough that we don't want
+ // to split further.
+ if (VecTy->getPrimitiveSizeInBits() <= SplitMatmulRemainderOverThreshold)
+ return Remainder;
+
+ // Gradually lower the vectorization factor to cover the
+ // remainder.
+ do {
+ BlockSize /= 2;
+ } while (BlockSize > Remainder);
+ return BlockSize;
+ }
+
/// Compute \p Result += \p A * \p B for input matrices with left-associating
/// addition.
///
@@ -1756,10 +1788,8 @@ public:
bool isSumZero = isa<ConstantAggregateZero>(Result.getColumn(J));
for (unsigned I = 0; I < R; I += BlockSize) {
- // Gradually lower the vectorization factor to cover the remainder.
- while (I + BlockSize > R)
- BlockSize /= 2;
-
+ // Lower block size to make sure we stay within bounds.
+ BlockSize = capBlockSize(BlockSize, R - I, Result.getElementType());
Value *Sum = IsTiled ? Result.extractVector(I, J, BlockSize, Builder)
: nullptr;
for (unsigned K = 0; K < M; ++K) {
@@ -1784,9 +1814,8 @@ public:
unsigned BlockSize = VF;
bool isSumZero = isa<ConstantAggregateZero>(Result.getRow(I));
for (unsigned J = 0; J < C; J += BlockSize) {
- // Gradually lower the vectorization factor to cover the remainder.
- while (J + BlockSize > C)
- BlockSize /= 2;
+ // Lower the vectorization factor to cover the remainder.
+ BlockSize = capBlockSize(BlockSize, C - J, Result.getElementType());
Value *Sum = nullptr;
for (unsigned K = 0; K < M; ++K) {
diff --git a/llvm/lib/Transforms/Scalar/Reg2Mem.cpp b/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
index 30b27cb..7646624 100644
--- a/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
+++ b/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
@@ -107,9 +107,7 @@ PreservedAnalyses RegToMemPass::run(Function &F, FunctionAnalysisManager &AM) {
return PA;
}
-namespace llvm {
-
-void initializeRegToMemWrapperPassPass(PassRegistry &);
+namespace {
class RegToMemWrapperPass : public FunctionPass {
public:
@@ -136,7 +134,7 @@ public:
return N != 0 || Changed;
}
};
-} // namespace llvm
+} // namespace
INITIALIZE_PASS_BEGIN(RegToMemWrapperPass, "reg2mem", "", true, true)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index a692009..5c60fad 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -344,6 +344,12 @@ static void migrateDebugInfo(AllocaInst *OldAlloca, bool IsSplit,
uint64_t SliceSizeInBits, Instruction *OldInst,
Instruction *Inst, Value *Dest, Value *Value,
const DataLayout &DL) {
+ // If we want allocas to be migrated using this helper then we need to ensure
+ // that the BaseFragments map code still works. A simple solution would be
+ // to choose to always clone alloca dbg_assigns (rather than sometimes
+ // "stealing" them).
+ assert(!isa<AllocaInst>(Inst) && "Unexpected alloca");
+
auto DVRAssignMarkerRange = at::getDVRAssignmentMarkers(OldInst);
// Nothing to do if OldInst has no linked dbg.assign intrinsics.
if (DVRAssignMarkerRange.empty())
@@ -429,11 +435,22 @@ static void migrateDebugInfo(AllocaInst *OldAlloca, bool IsSplit,
Inst->setMetadata(LLVMContext::MD_DIAssignID, NewID);
}
- ::Value *NewValue = Value ? Value : DbgAssign->getValue();
- DbgVariableRecord *NewAssign = cast<DbgVariableRecord>(cast<DbgRecord *>(
- DIB.insertDbgAssign(Inst, NewValue, DbgAssign->getVariable(), Expr,
- Dest, DIExpression::get(Expr->getContext(), {}),
- DbgAssign->getDebugLoc())));
+ DbgVariableRecord *NewAssign;
+ if (IsSplit) {
+ ::Value *NewValue = Value ? Value : DbgAssign->getValue();
+ NewAssign = cast<DbgVariableRecord>(cast<DbgRecord *>(
+ DIB.insertDbgAssign(Inst, NewValue, DbgAssign->getVariable(), Expr,
+ Dest, DIExpression::get(Expr->getContext(), {}),
+ DbgAssign->getDebugLoc())));
+ } else {
+ // The store is not split, simply steal the existing dbg_assign.
+ NewAssign = DbgAssign;
+ NewAssign->setAssignId(NewID); // FIXME: Can we avoid generating new IDs?
+ NewAssign->setAddress(Dest);
+ if (Value)
+ NewAssign->replaceVariableLocationOp(0u, Value);
+ assert(Expr == NewAssign->getExpression());
+ }
// If we've updated the value but the original dbg.assign has an arglist
// then kill it now - we can't use the requested new value.
@@ -464,9 +481,10 @@ static void migrateDebugInfo(AllocaInst *OldAlloca, bool IsSplit,
// noted as slightly offset (in code) from the store. In practice this
// should have little effect on the debugging experience due to the fact
// that all the split stores should get the same line number.
- NewAssign->moveBefore(DbgAssign->getIterator());
-
- NewAssign->setDebugLoc(DbgAssign->getDebugLoc());
+ if (NewAssign != DbgAssign) {
+ NewAssign->moveBefore(DbgAssign->getIterator());
+ NewAssign->setDebugLoc(DbgAssign->getDebugLoc());
+ }
LLVM_DEBUG(dbgs() << "Created new assign: " << *NewAssign << "\n");
};
diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
index e4ba70d..5af6c96 100644
--- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
+++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
@@ -27,7 +27,6 @@
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/MustExecute.h"
-#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -3611,8 +3610,7 @@ static bool unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI,
AssumptionCache &AC, AAResults &AA,
TargetTransformInfo &TTI, bool Trivial,
bool NonTrivial, ScalarEvolution *SE,
- MemorySSAUpdater *MSSAU, ProfileSummaryInfo *PSI,
- BlockFrequencyInfo *BFI, LPMUpdater &LoopUpdater) {
+ MemorySSAUpdater *MSSAU, LPMUpdater &LoopUpdater) {
assert(L.isRecursivelyLCSSAForm(DT, LI) &&
"Loops must be in LCSSA form before unswitching.");
@@ -3652,35 +3650,6 @@ static bool unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI,
if (F->hasOptSize())
return false;
- // Returns true if Loop L's loop nest is cold, i.e. if the headers of L,
- // of the loops L is nested in, and of the loops nested in L are all cold.
- auto IsLoopNestCold = [&](const Loop *L) {
- // Check L and all of its parent loops.
- auto *Parent = L;
- while (Parent) {
- if (!PSI->isColdBlock(Parent->getHeader(), BFI))
- return false;
- Parent = Parent->getParentLoop();
- }
- // Next check all loops nested within L.
- SmallVector<const Loop *, 4> Worklist;
- llvm::append_range(Worklist, L->getSubLoops());
- while (!Worklist.empty()) {
- auto *CurLoop = Worklist.pop_back_val();
- if (!PSI->isColdBlock(CurLoop->getHeader(), BFI))
- return false;
- llvm::append_range(Worklist, CurLoop->getSubLoops());
- }
- return true;
- };
-
- // Skip cold loops in cold loop nests, as unswitching them brings little
- // benefit but increases the code size
- if (PSI && PSI->hasProfileSummary() && BFI && IsLoopNestCold(&L)) {
- LLVM_DEBUG(dbgs() << " Skip cold loop: " << L << "\n");
- return false;
- }
-
// Perform legality checks.
if (!isSafeForNoNTrivialUnswitching(L, LI))
return false;
@@ -3705,11 +3674,6 @@ PreservedAnalyses SimpleLoopUnswitchPass::run(Loop &L, LoopAnalysisManager &AM,
LPMUpdater &U) {
Function &F = *L.getHeader()->getParent();
(void)F;
- ProfileSummaryInfo *PSI = nullptr;
- if (auto OuterProxy =
- AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR)
- .getCachedResult<ModuleAnalysisManagerFunctionProxy>(F))
- PSI = OuterProxy->getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
LLVM_DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << L
<< "\n");
@@ -3720,7 +3684,7 @@ PreservedAnalyses SimpleLoopUnswitchPass::run(Loop &L, LoopAnalysisManager &AM,
AR.MSSA->verifyMemorySSA();
}
if (!unswitchLoop(L, AR.DT, AR.LI, AR.AC, AR.AA, AR.TTI, Trivial, NonTrivial,
- &AR.SE, MSSAU ? &*MSSAU : nullptr, PSI, AR.BFI, U))
+ &AR.SE, MSSAU ? &*MSSAU : nullptr, U))
return PreservedAnalyses::all();
if (AR.MSSA && VerifyMemorySSA)
diff --git a/llvm/lib/Transforms/Utils/SCCPSolver.cpp b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
index 9693ae6..b80c3c9 100644
--- a/llvm/lib/Transforms/Utils/SCCPSolver.cpp
+++ b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
@@ -634,18 +634,10 @@ private:
/// Merge \p MergeWithV into \p IV and push \p V to the worklist, if \p IV
/// changes.
bool mergeInValue(ValueLatticeElement &IV, Value *V,
- ValueLatticeElement MergeWithV,
+ const ValueLatticeElement &MergeWithV,
ValueLatticeElement::MergeOptions Opts = {
/*MayIncludeUndef=*/false, /*CheckWiden=*/false});
- bool mergeInValue(Value *V, ValueLatticeElement MergeWithV,
- ValueLatticeElement::MergeOptions Opts = {
- /*MayIncludeUndef=*/false, /*CheckWiden=*/false}) {
- assert(!V->getType()->isStructTy() &&
- "non-structs should use markConstant");
- return mergeInValue(ValueState[V], V, MergeWithV, Opts);
- }
-
/// getValueState - Return the ValueLatticeElement object that corresponds to
/// the value. This function handles the case when the value hasn't been seen
/// yet by properly seeding constants etc.
@@ -987,7 +979,7 @@ public:
void trackValueOfArgument(Argument *A) {
if (A->getType()->isStructTy())
return (void)markOverdefined(A);
- mergeInValue(A, getArgAttributeVL(A));
+ mergeInValue(ValueState[A], A, getArgAttributeVL(A));
}
bool isStructLatticeConstant(Function *F, StructType *STy);
@@ -1128,8 +1120,7 @@ bool SCCPInstVisitor::isStructLatticeConstant(Function *F, StructType *STy) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i));
assert(It != TrackedMultipleRetVals.end());
- ValueLatticeElement LV = It->second;
- if (!SCCPSolver::isConstant(LV))
+ if (!SCCPSolver::isConstant(It->second))
return false;
}
return true;
@@ -1160,7 +1151,7 @@ Constant *SCCPInstVisitor::getConstantOrNull(Value *V) const {
std::vector<Constant *> ConstVals;
auto *ST = cast<StructType>(V->getType());
for (unsigned I = 0, E = ST->getNumElements(); I != E; ++I) {
- ValueLatticeElement LV = LVs[I];
+ const ValueLatticeElement &LV = LVs[I];
ConstVals.push_back(SCCPSolver::isConstant(LV)
? getConstant(LV, ST->getElementType(I))
: UndefValue::get(ST->getElementType(I)));
@@ -1225,7 +1216,7 @@ void SCCPInstVisitor::visitInstruction(Instruction &I) {
}
bool SCCPInstVisitor::mergeInValue(ValueLatticeElement &IV, Value *V,
- ValueLatticeElement MergeWithV,
+ const ValueLatticeElement &MergeWithV,
ValueLatticeElement::MergeOptions Opts) {
if (IV.mergeIn(MergeWithV, Opts)) {
pushUsersToWorkList(V);
@@ -1264,7 +1255,7 @@ void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
return;
}
- ValueLatticeElement BCValue = getValueState(BI->getCondition());
+ const ValueLatticeElement &BCValue = getValueState(BI->getCondition());
ConstantInt *CI = getConstantInt(BCValue, BI->getCondition()->getType());
if (!CI) {
// Overdefined condition variables, and branches on unfoldable constant
@@ -1326,7 +1317,7 @@ void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
// the target as executable.
if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) {
// Casts are folded by visitCastInst.
- ValueLatticeElement IBRValue = getValueState(IBR->getAddress());
+ const ValueLatticeElement &IBRValue = getValueState(IBR->getAddress());
BlockAddress *Addr = dyn_cast_or_null<BlockAddress>(
getConstant(IBRValue, IBR->getAddress()->getType()));
if (!Addr) { // Overdefined or unknown condition?
@@ -1408,7 +1399,7 @@ void SCCPInstVisitor::visitPHINode(PHINode &PN) {
if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
continue;
- ValueLatticeElement IV = getValueState(PN.getIncomingValue(i));
+ const ValueLatticeElement &IV = getValueState(PN.getIncomingValue(i));
PhiState.mergeIn(IV);
NumActiveIncoming++;
if (PhiState.isOverdefined())
@@ -1420,10 +1411,10 @@ void SCCPInstVisitor::visitPHINode(PHINode &PN) {
// extensions to match the number of active incoming values. This helps to
// limit multiple extensions caused by the same incoming value, if other
// incoming values are equal.
- mergeInValue(&PN, PhiState,
+ ValueLatticeElement &PhiStateRef = ValueState[&PN];
+ mergeInValue(PhiStateRef, &PN, PhiState,
ValueLatticeElement::MergeOptions().setMaxWidenSteps(
NumActiveIncoming + 1));
- ValueLatticeElement &PhiStateRef = getValueState(&PN);
PhiStateRef.setNumRangeExtensions(
std::max(NumActiveIncoming, PhiStateRef.getNumRangeExtensions()));
}
@@ -1481,7 +1472,7 @@ void SCCPInstVisitor::visitCastInst(CastInst &I) {
}
}
- ValueLatticeElement OpSt = getValueState(I.getOperand(0));
+ const ValueLatticeElement &OpSt = getValueState(I.getOperand(0));
if (OpSt.isUnknownOrUndef())
return;
@@ -1496,9 +1487,9 @@ void SCCPInstVisitor::visitCastInst(CastInst &I) {
if (I.getDestTy()->isIntOrIntVectorTy() &&
I.getSrcTy()->isIntOrIntVectorTy() &&
I.getOpcode() != Instruction::BitCast) {
- auto &LV = getValueState(&I);
ConstantRange OpRange =
OpSt.asConstantRange(I.getSrcTy(), /*UndefAllowed=*/false);
+ auto &LV = getValueState(&I);
Type *DestTy = I.getDestTy();
ConstantRange Res = ConstantRange::getEmpty(DestTy->getScalarSizeInBits());
@@ -1516,19 +1507,24 @@ void SCCPInstVisitor::handleExtractOfWithOverflow(ExtractValueInst &EVI,
const WithOverflowInst *WO,
unsigned Idx) {
Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
- ValueLatticeElement L = getValueState(LHS);
- ValueLatticeElement R = getValueState(RHS);
+ Type *Ty = LHS->getType();
+
addAdditionalUser(LHS, &EVI);
addAdditionalUser(RHS, &EVI);
- if (L.isUnknownOrUndef() || R.isUnknownOrUndef())
- return; // Wait to resolve.
- Type *Ty = LHS->getType();
+ const ValueLatticeElement &L = getValueState(LHS);
+ if (L.isUnknownOrUndef())
+ return; // Wait to resolve.
ConstantRange LR = L.asConstantRange(Ty, /*UndefAllowed=*/false);
+
+ const ValueLatticeElement &R = getValueState(RHS);
+ if (R.isUnknownOrUndef())
+ return; // Wait to resolve.
+
ConstantRange RR = R.asConstantRange(Ty, /*UndefAllowed=*/false);
if (Idx == 0) {
ConstantRange Res = LR.binaryOp(WO->getBinaryOp(), RR);
- mergeInValue(&EVI, ValueLatticeElement::getRange(Res));
+ mergeInValue(ValueState[&EVI], &EVI, ValueLatticeElement::getRange(Res));
} else {
assert(Idx == 1 && "Index can only be 0 or 1");
ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
@@ -1560,7 +1556,7 @@ void SCCPInstVisitor::visitExtractValueInst(ExtractValueInst &EVI) {
if (auto *WO = dyn_cast<WithOverflowInst>(AggVal))
return handleExtractOfWithOverflow(EVI, WO, i);
ValueLatticeElement EltVal = getStructValueState(AggVal, i);
- mergeInValue(getValueState(&EVI), &EVI, EltVal);
+ mergeInValue(ValueState[&EVI], &EVI, EltVal);
} else {
// Otherwise, must be extracting from an array.
return (void)markOverdefined(&EVI);
@@ -1616,14 +1612,18 @@ void SCCPInstVisitor::visitSelectInst(SelectInst &I) {
if (ValueState[&I].isOverdefined())
return (void)markOverdefined(&I);
- ValueLatticeElement CondValue = getValueState(I.getCondition());
+ const ValueLatticeElement &CondValue = getValueState(I.getCondition());
if (CondValue.isUnknownOrUndef())
return;
if (ConstantInt *CondCB =
getConstantInt(CondValue, I.getCondition()->getType())) {
Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
- mergeInValue(&I, getValueState(OpVal));
+ const ValueLatticeElement &OpValState = getValueState(OpVal);
+ // Safety: ValueState[&I] doesn't invalidate OpValState since it is already
+ // in the map.
+ assert(ValueState.contains(&I) && "&I is not in ValueState map.");
+ mergeInValue(ValueState[&I], &I, OpValState);
return;
}
@@ -1721,7 +1721,7 @@ void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
// being a special floating value.
ValueLatticeElement NewV;
NewV.markConstant(C, /*MayIncludeUndef=*/true);
- return (void)mergeInValue(&I, NewV);
+ return (void)mergeInValue(ValueState[&I], &I, NewV);
}
}
@@ -1741,7 +1741,7 @@ void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
R = A.overflowingBinaryOp(BO->getOpcode(), B, OBO->getNoWrapKind());
else
R = A.binaryOp(BO->getOpcode(), B);
- mergeInValue(&I, ValueLatticeElement::getRange(R));
+ mergeInValue(ValueState[&I], &I, ValueLatticeElement::getRange(R));
// TODO: Currently we do not exploit special values that produce something
// better than overdefined with an overdefined operand for vector or floating
@@ -1767,7 +1767,7 @@ void SCCPInstVisitor::visitCmpInst(CmpInst &I) {
if (C) {
ValueLatticeElement CV;
CV.markConstant(C);
- mergeInValue(&I, CV);
+ mergeInValue(ValueState[&I], &I, CV);
return;
}
@@ -1802,7 +1802,7 @@ void SCCPInstVisitor::visitGetElementPtrInst(GetElementPtrInst &I) {
Operands.reserve(I.getNumOperands());
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
- ValueLatticeElement State = getValueState(I.getOperand(i));
+ const ValueLatticeElement &State = getValueState(I.getOperand(i));
if (State.isUnknownOrUndef())
return; // Operands are not resolved yet.
@@ -1881,14 +1881,13 @@ void SCCPInstVisitor::visitLoadInst(LoadInst &I) {
if (ValueState[&I].isOverdefined())
return (void)markOverdefined(&I);
- ValueLatticeElement PtrVal = getValueState(I.getOperand(0));
+ const ValueLatticeElement &PtrVal = getValueState(I.getOperand(0));
if (PtrVal.isUnknownOrUndef())
return; // The pointer is not resolved yet!
- ValueLatticeElement &IV = ValueState[&I];
-
if (SCCPSolver::isConstant(PtrVal)) {
Constant *Ptr = getConstant(PtrVal, I.getOperand(0)->getType());
+ ValueLatticeElement &IV = ValueState[&I];
// load null is undefined.
if (isa<ConstantPointerNull>(Ptr)) {
@@ -1916,7 +1915,7 @@ void SCCPInstVisitor::visitLoadInst(LoadInst &I) {
}
// Fall back to metadata.
- mergeInValue(&I, getValueFromMetadata(&I));
+ mergeInValue(ValueState[&I], &I, getValueFromMetadata(&I));
}
void SCCPInstVisitor::visitCallBase(CallBase &CB) {
@@ -1944,7 +1943,7 @@ void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) {
return markOverdefined(&CB); // Can't handle struct args.
if (A.get()->getType()->isMetadataTy())
continue; // Carried in CB, not allowed in Operands.
- ValueLatticeElement State = getValueState(A);
+ const ValueLatticeElement &State = getValueState(A);
if (State.isUnknownOrUndef())
return; // Operands are not resolved yet.
@@ -1964,7 +1963,7 @@ void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) {
}
// Fall back to metadata.
- mergeInValue(&CB, getValueFromMetadata(&CB));
+ mergeInValue(ValueState[&CB], &CB, getValueFromMetadata(&CB));
}
void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
@@ -1992,10 +1991,11 @@ void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg,
getMaxWidenStepsOpts());
}
- } else
- mergeInValue(&*AI,
- getValueState(*CAI).intersect(getArgAttributeVL(&*AI)),
- getMaxWidenStepsOpts());
+ } else {
+ ValueLatticeElement CallArg =
+ getValueState(*CAI).intersect(getArgAttributeVL(&*AI));
+ mergeInValue(ValueState[&*AI], &*AI, CallArg, getMaxWidenStepsOpts());
+ }
}
}
}
@@ -2076,7 +2076,8 @@ void SCCPInstVisitor::handleCallResult(CallBase &CB) {
if (II->getIntrinsicID() == Intrinsic::vscale) {
unsigned BitWidth = CB.getType()->getScalarSizeInBits();
const ConstantRange Result = getVScaleRange(II->getFunction(), BitWidth);
- return (void)mergeInValue(II, ValueLatticeElement::getRange(Result));
+ return (void)mergeInValue(ValueState[II], II,
+ ValueLatticeElement::getRange(Result));
}
if (ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
@@ -2094,7 +2095,8 @@ void SCCPInstVisitor::handleCallResult(CallBase &CB) {
ConstantRange Result =
ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges);
- return (void)mergeInValue(II, ValueLatticeElement::getRange(Result));
+ return (void)mergeInValue(ValueState[II], II,
+ ValueLatticeElement::getRange(Result));
}
}
@@ -2121,7 +2123,7 @@ void SCCPInstVisitor::handleCallResult(CallBase &CB) {
return handleCallOverdefined(CB); // Not tracking this callee.
// If so, propagate the return value of the callee into this call result.
- mergeInValue(&CB, TFRVI->second, getMaxWidenStepsOpts());
+ mergeInValue(ValueState[&CB], &CB, TFRVI->second, getMaxWidenStepsOpts());
}
}
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 88af2cf..9cd52da 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -2242,8 +2242,49 @@ public:
/// may not be necessary.
bool isLoadCombineCandidate(ArrayRef<Value *> Stores) const;
bool isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy,
- Align Alignment, const int64_t Diff, Value *Ptr0,
- Value *PtrN, StridedPtrInfo &SPtrInfo) const;
+ Align Alignment, const int64_t Diff,
+ const size_t Sz) const;
+
+ /// Return true if an array of scalar loads can be replaced with a strided
+ /// load (with constant stride).
+ ///
+ /// TODO:
+ /// It is possible that the load gets "widened". Suppose that originally each
+ /// load loads `k` bytes and `PointerOps` can be arranged as follows (`%s` is
+ /// constant): %b + 0 * %s + 0 %b + 0 * %s + 1 %b + 0 * %s + 2
+ /// ...
+ /// %b + 0 * %s + (w - 1)
+ ///
+ /// %b + 1 * %s + 0
+ /// %b + 1 * %s + 1
+ /// %b + 1 * %s + 2
+ /// ...
+ /// %b + 1 * %s + (w - 1)
+ /// ...
+ ///
+ /// %b + (n - 1) * %s + 0
+ /// %b + (n - 1) * %s + 1
+ /// %b + (n - 1) * %s + 2
+ /// ...
+ /// %b + (n - 1) * %s + (w - 1)
+ ///
+ /// In this case we will generate a strided load of type `<n x (k * w)>`.
+ ///
+ /// \param PointerOps list of pointer arguments of loads.
+ /// \param ElemTy original scalar type of loads.
+ /// \param Alignment alignment of the first load.
+ /// \param SortedIndices is the order of PointerOps as returned by
+ /// `sortPtrAccesses`
+ /// \param Diff Pointer difference between the lowest and the highes pointer
+ /// in `PointerOps` as returned by `getPointersDiff`.
+ /// \param Ptr0 first pointer in `PointersOps`.
+ /// \param PtrN last pointer in `PointersOps`.
+ /// \param SPtrInfo If the function return `true`, it also sets all the fields
+ /// of `SPtrInfo` necessary to generate the strided load later.
+ bool analyzeConstantStrideCandidate(
+ const ArrayRef<Value *> PointerOps, Type *ElemTy, Align Alignment,
+ const SmallVectorImpl<unsigned> &SortedIndices, const int64_t Diff,
+ Value *Ptr0, Value *PtrN, StridedPtrInfo &SPtrInfo) const;
/// Return true if an array of scalar loads can be replaced with a strided
/// load (with run-time stride).
@@ -6849,9 +6890,8 @@ isMaskedLoadCompress(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps,
/// current graph (for masked gathers extra extractelement instructions
/// might be required).
bool BoUpSLP::isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy,
- Align Alignment, const int64_t Diff, Value *Ptr0,
- Value *PtrN, StridedPtrInfo &SPtrInfo) const {
- const size_t Sz = PointerOps.size();
+ Align Alignment, const int64_t Diff,
+ const size_t Sz) const {
if (Diff % (Sz - 1) != 0)
return false;
@@ -6875,27 +6915,40 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy,
return false;
if (!TTI->isLegalStridedLoadStore(VecTy, Alignment))
return false;
+ return true;
+ }
+ return false;
+}
- // Iterate through all pointers and check if all distances are
- // unique multiple of Dist.
- SmallSet<int64_t, 4> Dists;
- for (Value *Ptr : PointerOps) {
- int64_t Dist = 0;
- if (Ptr == PtrN)
- Dist = Diff;
- else if (Ptr != Ptr0)
- Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, *DL, *SE);
- // If the strides are not the same or repeated, we can't
- // vectorize.
- if (((Dist / Stride) * Stride) != Dist || !Dists.insert(Dist).second)
- break;
- }
- if (Dists.size() == Sz) {
- Type *StrideTy = DL->getIndexType(Ptr0->getType());
- SPtrInfo.StrideVal = ConstantInt::get(StrideTy, Stride);
- SPtrInfo.Ty = getWidenedType(ScalarTy, Sz);
- return true;
- }
+bool BoUpSLP::analyzeConstantStrideCandidate(
+ const ArrayRef<Value *> PointerOps, Type *ScalarTy, Align Alignment,
+ const SmallVectorImpl<unsigned> &SortedIndices, const int64_t Diff,
+ Value *Ptr0, Value *PtrN, StridedPtrInfo &SPtrInfo) const {
+ const size_t Sz = PointerOps.size();
+ if (!isStridedLoad(PointerOps, ScalarTy, Alignment, Diff, Sz))
+ return false;
+
+ int64_t Stride = Diff / static_cast<int64_t>(Sz - 1);
+
+ // Iterate through all pointers and check if all distances are
+ // unique multiple of Dist.
+ SmallSet<int64_t, 4> Dists;
+ for (Value *Ptr : PointerOps) {
+ int64_t Dist = 0;
+ if (Ptr == PtrN)
+ Dist = Diff;
+ else if (Ptr != Ptr0)
+ Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, *DL, *SE);
+ // If the strides are not the same or repeated, we can't
+ // vectorize.
+ if (((Dist / Stride) * Stride) != Dist || !Dists.insert(Dist).second)
+ break;
+ }
+ if (Dists.size() == Sz) {
+ Type *StrideTy = DL->getIndexType(Ptr0->getType());
+ SPtrInfo.StrideVal = ConstantInt::get(StrideTy, Stride);
+ SPtrInfo.Ty = getWidenedType(ScalarTy, Sz);
+ return true;
}
return false;
}
@@ -6995,8 +7048,8 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
Align Alignment =
cast<LoadInst>(Order.empty() ? VL.front() : VL[Order.front()])
->getAlign();
- if (isStridedLoad(PointerOps, ScalarTy, Alignment, *Diff, Ptr0, PtrN,
- SPtrInfo))
+ if (analyzeConstantStrideCandidate(PointerOps, ScalarTy, Alignment, Order,
+ *Diff, Ptr0, PtrN, SPtrInfo))
return LoadsState::StridedVectorize;
}
if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) ||
@@ -17632,7 +17685,9 @@ void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
}
if (IsPHI ||
(!E->isGather() && E->State != TreeEntry::SplitVectorize &&
- E->doesNotNeedToSchedule()) ||
+ (E->doesNotNeedToSchedule() ||
+ (E->hasCopyableElements() && !E->isCopyableElement(LastInst) &&
+ isUsedOutsideBlock(LastInst)))) ||
(GatheredLoadsEntriesFirst.has_value() &&
E->Idx >= *GatheredLoadsEntriesFirst && !E->isGather() &&
E->getOpcode() == Instruction::Load)) {
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 0e0b042..84d2ea6 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -407,6 +407,10 @@ public:
VPBasicBlock *getParent() { return Parent; }
const VPBasicBlock *getParent() const { return Parent; }
+ /// \return the VPRegionBlock which the recipe belongs to.
+ VPRegionBlock *getRegion();
+ const VPRegionBlock *getRegion() const;
+
/// The method which generates the output IR instructions that correspond to
/// this VPRecipe, thereby "executing" the VPlan.
virtual void execute(VPTransformState &State) = 0;
@@ -4075,6 +4079,14 @@ public:
}
};
+inline VPRegionBlock *VPRecipeBase::getRegion() {
+ return getParent()->getParent();
+}
+
+inline const VPRegionBlock *VPRecipeBase::getRegion() const {
+ return getParent()->getParent();
+}
+
/// VPlan models a candidate for vectorization, encoding various decisions take
/// to produce efficient output IR, including which branches, basic-blocks and
/// output IR instructions to generate, and their cost. VPlan holds a
diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
index f413c63..7e074c1 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
@@ -377,7 +377,7 @@ bool VPDominatorTree::properlyDominates(const VPRecipeBase *A,
#ifndef NDEBUG
auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
- auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
+ VPRegionBlock *Region = R->getRegion();
if (Region && Region->isReplicator()) {
assert(Region->getNumSuccessors() == 1 &&
Region->getNumPredecessors() == 1 && "Expected SESE region!");
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 7a98c75..d1e67e6b 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -2352,7 +2352,7 @@ bool VPWidenIntOrFpInductionRecipe::isCanonical() const {
return false;
auto *StepC = dyn_cast<ConstantInt>(getStepValue()->getLiveInIRValue());
auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue());
- auto *CanIV = getParent()->getParent()->getCanonicalIV();
+ auto *CanIV = getRegion()->getCanonicalIV();
return StartC && StartC->isZero() && StepC && StepC->isOne() &&
getScalarType() == CanIV->getScalarType();
}
@@ -3076,7 +3076,7 @@ static void scalarizeInstruction(const Instruction *Instr,
State.AC->registerAssumption(II);
assert(
- (RepRecipe->getParent()->getParent() ||
+ (RepRecipe->getRegion() ||
!RepRecipe->getParent()->getPlan()->getVectorLoopRegion() ||
all_of(RepRecipe->operands(),
[](VPValue *Op) { return Op->isDefinedOutsideLoopRegions(); })) &&
@@ -3268,7 +3268,7 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
to_vector(operands()), VF);
// If the recipe is not predicated (i.e. not in a replicate region), return
// the scalar cost. Otherwise handle predicated cost.
- if (!getParent()->getParent()->isReplicator())
+ if (!getRegion()->isReplicator())
return ScalarCost;
// Account for the phi nodes that we will create.
@@ -3284,7 +3284,7 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
case Instruction::Store: {
// TODO: See getMemInstScalarizationCost for how to handle replicating and
// predicated cases.
- const VPRegionBlock *ParentRegion = getParent()->getParent();
+ const VPRegionBlock *ParentRegion = getRegion();
if (ParentRegion && ParentRegion->isReplicator())
break;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index cae9aee8..f5f616f 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -1858,8 +1858,8 @@ static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR,
return nullptr;
VPRegionBlock *EnclosingLoopRegion =
HoistCandidate->getParent()->getEnclosingLoopRegion();
- assert((!HoistCandidate->getParent()->getParent() ||
- HoistCandidate->getParent()->getParent() == EnclosingLoopRegion) &&
+ assert((!HoistCandidate->getRegion() ||
+ HoistCandidate->getRegion() == EnclosingLoopRegion) &&
"CFG in VPlan should still be flat, without replicate regions");
// Hoist candidate was already visited, no need to hoist.
if (!Visited.insert(HoistCandidate).second)
@@ -2898,7 +2898,7 @@ void VPlanTransforms::replaceSymbolicStrides(
// evolution.
auto CanUseVersionedStride = [&Plan](VPUser &U, unsigned) {
auto *R = cast<VPRecipeBase>(&U);
- return R->getParent()->getParent() ||
+ return R->getRegion() ||
R->getParent() == Plan.getVectorLoopRegion()->getSinglePredecessor();
};
ValueToSCEVMapTy RewriteMap;
@@ -3803,8 +3803,7 @@ void VPlanTransforms::materializeBuildVectors(VPlan &Plan) {
continue;
auto *DefR = cast<VPRecipeWithIRFlags>(&R);
auto UsesVectorOrInsideReplicateRegion = [DefR, LoopRegion](VPUser *U) {
- VPRegionBlock *ParentRegion =
- cast<VPRecipeBase>(U)->getParent()->getParent();
+ VPRegionBlock *ParentRegion = cast<VPRecipeBase>(U)->getRegion();
return !U->usesScalars(DefR) || ParentRegion != LoopRegion;
};
if ((isa<VPReplicateRecipe>(DefR) &&
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.h b/llvm/lib/Transforms/Vectorize/VPlanUtils.h
index cf95ac0..9a2497e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUtils.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.h
@@ -64,7 +64,7 @@ inline bool isSingleScalar(const VPValue *VPV) {
return true;
if (auto *Rep = dyn_cast<VPReplicateRecipe>(VPV)) {
- const VPRegionBlock *RegionOfR = Rep->getParent()->getParent();
+ const VPRegionBlock *RegionOfR = Rep->getRegion();
// Don't consider recipes in replicate regions as uniform yet; their first
// lane cannot be accessed when executing the replicate region for other
// lanes.
diff --git a/llvm/test/Bindings/llvm-c/debug_info_new_format.ll b/llvm/test/Bindings/llvm-c/debug_info_new_format.ll
index 83b37da..75e5fa0 100644
--- a/llvm/test/Bindings/llvm-c/debug_info_new_format.ll
+++ b/llvm/test/Bindings/llvm-c/debug_info_new_format.ll
@@ -3,37 +3,37 @@
; CHECK: ; ModuleID = 'debuginfo.c'
; CHECK-NEXT: source_filename = "debuginfo.c"
-
-; CHECK: define i64 @foo(i64 %0, i64 %1, <10 x i64> %2) !dbg !44 {
+
+; CHECK: define i64 @foo(i64 %0, i64 %1, <10 x i64> %2) !dbg !45 {
; CHECK-NEXT: entry:
-; CHECK-NEXT: #dbg_declare(i64 0, !49, !DIExpression(), !58)
-; CHECK-NEXT: #dbg_declare(i64 0, !50, !DIExpression(), !58)
-; CHECK-NEXT: #dbg_declare(i64 0, !51, !DIExpression(), !58)
-; CHECK-NEXT: #dbg_label(!59, !58)
+; CHECK-NEXT: #dbg_declare(i64 0, !50, !DIExpression(), !59)
+; CHECK-NEXT: #dbg_declare(i64 0, !51, !DIExpression(), !59)
+; CHECK-NEXT: #dbg_declare(i64 0, !52, !DIExpression(), !59)
+; CHECK-NEXT: #dbg_label(!60, !59)
; CHECK-NEXT: br label %vars
-; CHECK-NEXT: #dbg_label(!60, !58)
+; CHECK-NEXT: #dbg_label(!61, !59)
; CHECK-NEXT: br label %vars
; CHECK: vars: ; preds = %entry, %entry
; CHECK-NEXT: %p1 = phi i64 [ 0, %entry ]
; CHECK-NEXT: %p2 = phi i64 [ 0, %entry ]
-; CHECK-NEXT: #dbg_value(i64 0, !42, !DIExpression(DW_OP_constu, 0, DW_OP_stack_value), !61)
-; CHECK-NEXT: #dbg_value(i64 1, !52, !DIExpression(DW_OP_constu, 1, DW_OP_stack_value), !61)
+; CHECK-NEXT: #dbg_value(i64 0, !43, !DIExpression(DW_OP_constu, 0, DW_OP_stack_value), !62)
+; CHECK-NEXT: #dbg_value(i64 1, !53, !DIExpression(DW_OP_constu, 1, DW_OP_stack_value), !62)
; CHECK-NEXT: %a = add i64 %p1, %p2
; CHECK-NEXT: ret i64 0
; CHECK-NEXT: }
; CHECK: !llvm.dbg.cu = !{!0}
-; CHECK-NEXT: !FooType = !{!33}
+; CHECK-NEXT: !FooType = !{!34}
; CHECK-NEXT: !EnumTest = !{!3}
; CHECK-NEXT: !LargeEnumTest = !{!11}
-; CHECK-NEXT: !SubrangeType = !{!36}
-; CHECK-NEXT: !SetType1 = !{!37}
-; CHECK-NEXT: !SetType2 = !{!38}
-; CHECK-NEXT: !DynType = !{!39}
-; CHECK-NEXT: !ClassType = !{!54}
+; CHECK-NEXT: !SubrangeType = !{!37}
+; CHECK-NEXT: !SetType1 = !{!38}
+; CHECK-NEXT: !SetType2 = !{!39}
+; CHECK-NEXT: !DynType = !{!40}
+; CHECK-NEXT: !ClassType = !{!55}
-; CHECK: !0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "llvm-c-test", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !16, imports: !24, macros: !28, splitDebugInlining: false, sysroot: "/")
+; CHECK: !0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "llvm-c-test", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !16, imports: !24, macros: !29, splitDebugInlining: false, sysroot: "/")
; CHECK-NEXT: !1 = !DIFile(filename: "debuginfo.c", directory: ".")
; CHECK-NEXT: !2 = !{!3, !11}
; CHECK-NEXT: !3 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "EnumTest", scope: !4, file: !1, baseType: !6, size: 64, elements: !7)
@@ -57,41 +57,42 @@
; CHECK-NEXT: !21 = !DIGlobalVariableExpression(var: !22, expr: !DIExpression(DW_OP_constu, 0, DW_OP_stack_value))
; CHECK-NEXT: !22 = distinct !DIGlobalVariable(name: "global", scope: !5, file: !1, line: 1, type: !23, isLocal: true, isDefinition: true)
; CHECK-NEXT: !23 = !DIDerivedType(tag: DW_TAG_typedef, name: "int64_t", scope: !1, file: !1, line: 42, baseType: !6)
-; CHECK-NEXT: !24 = !{!25, !27}
-; CHECK-NEXT: !25 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !5, entity: !26, file: !1, line: 42)
+; CHECK-NEXT: !24 = !{!25, !28}
+; CHECK-NEXT: !25 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !5, entity: !26, file: !27, line: 42)
; CHECK-NEXT: !26 = !DIModule(scope: null, name: "llvm-c-test-import", includePath: "/test/include/llvm-c-test-import.h")
-; CHECK-NEXT: !27 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !5, entity: !25, file: !1, line: 42)
-; CHECK-NEXT: !28 = !{!29}
-; CHECK-NEXT: !29 = !DIMacroFile(file: !1, nodes: !30)
-; CHECK-NEXT: !30 = !{!31, !32}
-; CHECK-NEXT: !31 = !DIMacro(type: DW_MACINFO_define, name: "SIMPLE_DEFINE")
-; CHECK-NEXT: !32 = !DIMacro(type: DW_MACINFO_define, name: "VALUE_DEFINE", value: "1")
-; CHECK-NEXT: !33 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !34, size: 192, dwarfAddressSpace: 0)
-; CHECK-NEXT: !34 = !DICompositeType(tag: DW_TAG_structure_type, name: "MyStruct", scope: !4, file: !1, size: 192, elements: !35, runtimeLang: DW_LANG_C89, identifier: "MyStruct")
-; CHECK-NEXT: !35 = !{!6, !6, !6}
-; CHECK-NEXT: !36 = !DISubrangeType(name: "foo", scope: !1, file: !1, line: 42, size: 64, baseType: !6, lowerBound: i64 0, upperBound: i64 1, stride: i64 8, bias: i64 4)
-; CHECK-NEXT: !37 = !DIDerivedType(tag: DW_TAG_set_type, name: "enumset", scope: !1, file: !1, line: 42, baseType: !3, size: 64)
-; CHECK-NEXT: !38 = !DIDerivedType(tag: DW_TAG_set_type, name: "subrangeset", scope: !1, file: !1, line: 42, baseType: !36, size: 64)
-; CHECK-NEXT: !39 = !DICompositeType(tag: DW_TAG_array_type, name: "foo", scope: !1, file: !1, line: 42, baseType: !6, size: 640, elements: !40, dataLocation: !DIExpression(), associated: !42, rank: !DIExpression())
-; CHECK-NEXT: !40 = !{!41}
-; CHECK-NEXT: !41 = !DISubrange(count: 10, lowerBound: 0)
-; CHECK-NEXT: !42 = !DILocalVariable(name: "d", scope: !43, file: !1, line: 43, type: !6)
-; CHECK-NEXT: !43 = distinct !DILexicalBlock(scope: !44, file: !1, line: 42)
-; CHECK-NEXT: !44 = distinct !DISubprogram(name: "foo", linkageName: "foo", scope: !1, file: !1, line: 42, type: !45, scopeLine: 42, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition, unit: !0, retainedNodes: !48)
-; CHECK-NEXT: !45 = !DISubroutineType(types: !46)
-; CHECK-NEXT: !46 = !{!6, !6, !47}
-; CHECK-NEXT: !47 = !DICompositeType(tag: DW_TAG_array_type, baseType: !6, size: 640, flags: DIFlagVector, elements: !40)
-; CHECK-NEXT: !48 = !{!49, !50, !51, !42, !52, !53}
-; CHECK-NEXT: !49 = !DILocalVariable(name: "a", arg: 1, scope: !44, file: !1, line: 42, type: !6)
-; CHECK-NEXT: !50 = !DILocalVariable(name: "b", arg: 2, scope: !44, file: !1, line: 42, type: !6)
-; CHECK-NEXT: !51 = !DILocalVariable(name: "c", arg: 3, scope: !44, file: !1, line: 42, type: !47)
-; CHECK-NEXT: !52 = !DILocalVariable(name: "e", scope: !43, file: !1, line: 44, type: !6)
-; CHECK-NEXT: !53 = !DILabel(scope: !44, name: "label3", file: !1, line: 42)
-; CHECK-NEXT: !54 = !DICompositeType(tag: DW_TAG_class_type, name: "Class", scope: !4, file: !1, size: 192, flags: DIFlagFwdDecl, elements: !55, identifier: "FooClass")
-; CHECK-NEXT: !55 = !{!56}
-; CHECK-NEXT: !56 = !{!6, !6, !57}
-; CHECK-NEXT: !57 = !DIBasicType(name: "Int32", size: 32)
-; CHECK-NEXT: !58 = !DILocation(line: 42, scope: !44)
-; CHECK-NEXT: !59 = !DILabel(scope: !44, name: "label1", file: !1, line: 42)
-; CHECK-NEXT: !60 = !DILabel(scope: !44, name: "label2", file: !1, line: 42)
-; CHECK-NEXT: !61 = !DILocation(line: 43, scope: !44)
+; CHECK-NEXT: !27 = !DIFile(filename: "debuginfo.c", directory: ".", checksumkind: CSK_MD5, checksum: "1234", source: "source")
+; CHECK-NEXT: !28 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !5, entity: !25, file: !1, line: 42)
+; CHECK-NEXT: !29 = !{!30}
+; CHECK-NEXT: !30 = !DIMacroFile(file: !1, nodes: !31)
+; CHECK-NEXT: !31 = !{!32, !33}
+; CHECK-NEXT: !32 = !DIMacro(type: DW_MACINFO_define, name: "SIMPLE_DEFINE")
+; CHECK-NEXT: !33 = !DIMacro(type: DW_MACINFO_define, name: "VALUE_DEFINE", value: "1")
+; CHECK-NEXT: !34 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !35, size: 192, dwarfAddressSpace: 0)
+; CHECK-NEXT: !35 = !DICompositeType(tag: DW_TAG_structure_type, name: "MyStruct", scope: !4, file: !1, size: 192, elements: !36, runtimeLang: DW_LANG_C89, identifier: "MyStruct")
+; CHECK-NEXT: !36 = !{!6, !6, !6}
+; CHECK-NEXT: !37 = !DISubrangeType(name: "foo", scope: !1, file: !1, line: 42, size: 64, baseType: !6, lowerBound: i64 0, upperBound: i64 1, stride: i64 8, bias: i64 4)
+; CHECK-NEXT: !38 = !DIDerivedType(tag: DW_TAG_set_type, name: "enumset", scope: !1, file: !1, line: 42, baseType: !3, size: 64)
+; CHECK-NEXT: !39 = !DIDerivedType(tag: DW_TAG_set_type, name: "subrangeset", scope: !1, file: !1, line: 42, baseType: !37, size: 64)
+; CHECK-NEXT: !40 = !DICompositeType(tag: DW_TAG_array_type, name: "foo", scope: !1, file: !1, line: 42, baseType: !6, size: 640, elements: !41, dataLocation: !DIExpression(), associated: !43, rank: !DIExpression())
+; CHECK-NEXT: !41 = !{!42}
+; CHECK-NEXT: !42 = !DISubrange(count: 10, lowerBound: 0)
+; CHECK-NEXT: !43 = !DILocalVariable(name: "d", scope: !44, file: !1, line: 43, type: !6)
+; CHECK-NEXT: !44 = distinct !DILexicalBlock(scope: !45, file: !1, line: 42)
+; CHECK-NEXT: !45 = distinct !DISubprogram(name: "foo", linkageName: "foo", scope: !1, file: !1, line: 42, type: !46, scopeLine: 42, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition, unit: !0, retainedNodes: !49)
+; CHECK-NEXT: !46 = !DISubroutineType(types: !47)
+; CHECK-NEXT: !47 = !{!6, !6, !48}
+; CHECK-NEXT: !48 = !DICompositeType(tag: DW_TAG_array_type, baseType: !6, size: 640, flags: DIFlagVector, elements: !41)
+; CHECK-NEXT: !49 = !{!50, !51, !52, !43, !53, !54}
+; CHECK-NEXT: !50 = !DILocalVariable(name: "a", arg: 1, scope: !45, file: !1, line: 42, type: !6)
+; CHECK-NEXT: !51 = !DILocalVariable(name: "b", arg: 2, scope: !45, file: !1, line: 42, type: !6)
+; CHECK-NEXT: !52 = !DILocalVariable(name: "c", arg: 3, scope: !45, file: !1, line: 42, type: !48)
+; CHECK-NEXT: !53 = !DILocalVariable(name: "e", scope: !44, file: !1, line: 44, type: !6)
+; CHECK-NEXT: !54 = !DILabel(scope: !45, name: "label3", file: !1, line: 42)
+; CHECK-NEXT: !55 = !DICompositeType(tag: DW_TAG_class_type, name: "Class", scope: !4, file: !1, size: 192, flags: DIFlagFwdDecl, elements: !56, identifier: "FooClass")
+; CHECK-NEXT: !56 = !{!57}
+; CHECK-NEXT: !57 = !{!6, !6, !58}
+; CHECK-NEXT: !58 = !DIBasicType(name: "Int32", size: 32)
+; CHECK-NEXT: !59 = !DILocation(line: 42, scope: !45)
+; CHECK-NEXT: !60 = !DILabel(scope: !45, name: "label1", file: !1, line: 42)
+; CHECK-NEXT: !61 = !DILabel(scope: !45, name: "label2", file: !1, line: 42)
+; CHECK-NEXT: !62 = !DILocation(line: 43, scope: !45)
diff --git a/llvm/test/CodeGen/AMDGPU/abs_i16.ll b/llvm/test/CodeGen/AMDGPU/abs_i16.ll
index 7633ba0..66cc7f3 100644
--- a/llvm/test/CodeGen/AMDGPU/abs_i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/abs_i16.ll
@@ -15,7 +15,7 @@ define i16 @abs_i16(i16 %arg) {
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX6-NEXT: v_max_i32_e32 v0, v1, v0
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: abs_i16:
@@ -23,7 +23,7 @@ define i16 @abs_i16(i16 %arg) {
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-NEXT: v_sub_i32_e32 v1, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: v_max_i32_e32 v0, v1, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: abs_i16:
@@ -97,9 +97,9 @@ define <2 x i16> @v_abs_v2i16(<2 x i16> %arg) {
; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX6-NEXT: v_max_i32_e32 v0, v2, v0
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v2
+; GFX6-NEXT: v_max_i32_e32 v1, v2, v1
; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
; GFX6-NEXT: s_setpc_b64 s[30:31]
@@ -110,9 +110,9 @@ define <2 x i16> @v_abs_v2i16(<2 x i16> %arg) {
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
; GFX7-NEXT: v_sub_i32_e32 v2, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_max_i32_e32 v0, v2, v0
; GFX7-NEXT: v_sub_i32_e32 v2, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v2
+; GFX7-NEXT: v_max_i32_e32 v1, v2, v1
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -172,15 +172,15 @@ define <3 x i16> @v_abs_v3i16(<3 x i16> %arg) {
; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v3, v0
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
+; GFX6-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v2
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_max_i32_e32 v2, v3, v2
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v1
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v3i16:
@@ -189,15 +189,15 @@ define <3 x i16> @v_abs_v3i16(<3 x i16> %arg) {
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v3, v0
+; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
+; GFX7-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_max_i32_e32 v2, v3, v2
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_sub_i32_e32 v1, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v1
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v3i16:
@@ -262,47 +262,45 @@ define <4 x i16> @v_abs_v4i16(<4 x i16> %arg) {
; GFX6-LABEL: v_abs_v4i16:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v0
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v4, v0
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v1
; GFX6-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v1, v4, v1
; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v4
+; GFX6-NEXT: v_max_i32_e32 v2, v4, v2
; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v3
-; GFX6-NEXT: v_max_i32_e32 v3, v3, v4
-; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_max_i32_e32 v3, v4, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v4
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v4i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v4, vcc, 0, v0
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v4, v0
+; GFX7-NEXT: v_sub_i32_e32 v4, vcc, 0, v1
; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v4, v1
; GFX7-NEXT: v_sub_i32_e32 v4, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v4
+; GFX7-NEXT: v_max_i32_e32 v2, v4, v2
; GFX7-NEXT: v_sub_i32_e32 v4, vcc, 0, v3
-; GFX7-NEXT: v_max_i32_e32 v3, v3, v4
-; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_i32_e32 v3, v4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v4
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v4i16:
@@ -370,63 +368,61 @@ define <6 x i16> @v_abs_v6i16(<6 x i16> %arg) {
; GFX6-LABEL: v_abs_v6i16:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v0
+; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v6, v0
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v1
+; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v1, v6, v1
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v4
+; GFX6-NEXT: v_max_i32_e32 v4, v6, v4
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v5
+; GFX6-NEXT: v_max_i32_e32 v5, v6, v5
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX6-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX6-NEXT: v_or_b32_e32 v4, v4, v6
; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX6-NEXT: v_max_i32_e32 v2, v6, v2
; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v3
-; GFX6-NEXT: v_max_i32_e32 v3, v3, v6
-; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
-; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v3, v6, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v3
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v5
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v6
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 0, v4
-; GFX6-NEXT: v_max_i32_e32 v5, v5, v3
-; GFX6-NEXT: v_max_i32_e32 v1, v4, v1
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX6-NEXT: v_or_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v6i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v0
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v6, v0
+; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v1
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v6, v1
+; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v4
+; GFX7-NEXT: v_max_i32_e32 v4, v6, v4
+; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v5
+; GFX7-NEXT: v_max_i32_e32 v5, v6, v5
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v6
; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX7-NEXT: v_max_i32_e32 v2, v6, v2
; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v3
-; GFX7-NEXT: v_max_i32_e32 v3, v3, v6
-; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
-; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v3, v6, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v5
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v6
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_sub_i32_e32 v1, vcc, 0, v4
-; GFX7-NEXT: v_max_i32_e32 v5, v5, v3
-; GFX7-NEXT: v_max_i32_e32 v1, v4, v1
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX7-NEXT: v_or_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v6i16:
@@ -509,83 +505,79 @@ define <8 x i16> @v_abs_v8i16(<8 x i16> %arg) {
; GFX6-LABEL: v_abs_v8i16:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v0
+; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v8, v0
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v1
+; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v1, v8, v1
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v4
; GFX6-NEXT: v_bfe_i32 v6, v6, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v4, v8, v4
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v5
; GFX6-NEXT: v_bfe_i32 v7, v7, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v5, v8, v5
; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v6
-; GFX6-NEXT: v_max_i32_e32 v6, v6, v8
+; GFX6-NEXT: v_max_i32_e32 v6, v8, v6
; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v7
-; GFX6-NEXT: v_max_i32_e32 v7, v7, v8
-; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX6-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX6-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX6-NEXT: v_max_i32_e32 v7, v8, v7
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX6-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX6-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v6, v6, v8
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v2
+; GFX6-NEXT: v_max_i32_e32 v2, v8, v2
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v3
+; GFX6-NEXT: v_max_i32_e32 v3, v8, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v3
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v8
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX6-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v7, 16, v6
+; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX6-NEXT: v_alignbit_b32 v5, v6, v5, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v8i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v0
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v8, v0
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v1
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v8, v1
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v4
; GFX7-NEXT: v_bfe_i32 v6, v6, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v4, v8, v4
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v5
; GFX7-NEXT: v_bfe_i32 v7, v7, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v5, v8, v5
; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v6
-; GFX7-NEXT: v_max_i32_e32 v6, v6, v8
+; GFX7-NEXT: v_max_i32_e32 v6, v8, v6
; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v7
-; GFX7-NEXT: v_max_i32_e32 v7, v7, v8
-; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX7-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX7-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX7-NEXT: v_max_i32_e32 v7, v8, v7
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX7-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v8
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v2
+; GFX7-NEXT: v_max_i32_e32 v2, v8, v2
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v3
+; GFX7-NEXT: v_max_i32_e32 v3, v8, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v8
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX7-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v6
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v8i16:
@@ -682,155 +674,147 @@ define <16 x i16> @v_abs_v16i16(<16 x i16> %arg) {
; GFX6-LABEL: v_abs_v16i16:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v0
+; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v16, v0
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v1
+; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v1, v16, v1
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v4
+; GFX6-NEXT: v_bfe_i32 v8, v8, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v4, v16, v4
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v5
+; GFX6-NEXT: v_bfe_i32 v9, v9, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v5, v16, v5
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v8
+; GFX6-NEXT: v_bfe_i32 v12, v12, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v8, v16, v8
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v9
+; GFX6-NEXT: v_bfe_i32 v13, v13, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v9, v16, v9
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v12
; GFX6-NEXT: v_bfe_i32 v14, v14, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v12, v16, v12
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v13
; GFX6-NEXT: v_bfe_i32 v15, v15, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v13, v16, v13
; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v14
-; GFX6-NEXT: v_max_i32_e32 v14, v14, v16
+; GFX6-NEXT: v_max_i32_e32 v14, v16, v14
; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v15
-; GFX6-NEXT: v_max_i32_e32 v15, v15, v16
-; GFX6-NEXT: v_bfe_i32 v12, v12, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX6-NEXT: v_bfe_i32 v13, v13, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX6-NEXT: v_sub_i32_e32 v15, vcc, 0, v12
-; GFX6-NEXT: v_max_i32_e32 v12, v12, v15
-; GFX6-NEXT: v_sub_i32_e32 v15, vcc, 0, v13
-; GFX6-NEXT: v_max_i32_e32 v13, v13, v15
+; GFX6-NEXT: v_max_i32_e32 v15, v16, v15
; GFX6-NEXT: v_bfe_i32 v10, v10, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX6-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX6-NEXT: v_bfe_i32 v11, v11, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v12, v12, v13
-; GFX6-NEXT: v_sub_i32_e32 v13, vcc, 0, v10
-; GFX6-NEXT: v_max_i32_e32 v10, v10, v13
-; GFX6-NEXT: v_sub_i32_e32 v13, vcc, 0, v11
-; GFX6-NEXT: v_max_i32_e32 v11, v11, v13
-; GFX6-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX6-NEXT: v_bfe_i32 v9, v9, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v10, v10, v11
-; GFX6-NEXT: v_sub_i32_e32 v11, vcc, 0, v8
-; GFX6-NEXT: v_max_i32_e32 v8, v8, v11
-; GFX6-NEXT: v_sub_i32_e32 v11, vcc, 0, v9
-; GFX6-NEXT: v_max_i32_e32 v9, v9, v11
+; GFX6-NEXT: v_or_b32_e32 v14, v14, v16
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v10
+; GFX6-NEXT: v_max_i32_e32 v10, v16, v10
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v11
+; GFX6-NEXT: v_max_i32_e32 v11, v16, v11
; GFX6-NEXT: v_bfe_i32 v6, v6, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX6-NEXT: v_lshlrev_b32_e32 v16, 16, v11
; GFX6-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v8, v8, v9
-; GFX6-NEXT: v_sub_i32_e32 v9, vcc, 0, v6
-; GFX6-NEXT: v_max_i32_e32 v6, v6, v9
-; GFX6-NEXT: v_sub_i32_e32 v9, vcc, 0, v7
-; GFX6-NEXT: v_max_i32_e32 v7, v7, v9
-; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX6-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX6-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX6-NEXT: v_or_b32_e32 v10, v10, v16
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v6
+; GFX6-NEXT: v_max_i32_e32 v6, v16, v6
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v7
+; GFX6-NEXT: v_max_i32_e32 v7, v16, v7
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v16, 16, v7
; GFX6-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX6-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v6, v6, v16
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v2
+; GFX6-NEXT: v_max_i32_e32 v2, v16, v2
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v3
+; GFX6-NEXT: v_max_i32_e32 v3, v16, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v16, 16, v3
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX6-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v16
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX6-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX6-NEXT: v_alignbit_b32 v9, v10, v8, 16
-; GFX6-NEXT: v_alignbit_b32 v13, v14, v12, 16
-; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v7, 16, v6
-; GFX6-NEXT: v_lshrrev_b32_e32 v11, 16, v10
-; GFX6-NEXT: v_lshrrev_b32_e32 v15, 16, v14
+; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX6-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX6-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX6-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GFX6-NEXT: v_alignbit_b32 v9, v10, v9, 16
+; GFX6-NEXT: v_alignbit_b32 v13, v14, v13, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v16i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v0
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v16, v0
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v1
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v16, v1
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v4
+; GFX7-NEXT: v_bfe_i32 v8, v8, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v4, v16, v4
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v5
+; GFX7-NEXT: v_bfe_i32 v9, v9, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v5, v16, v5
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v8
+; GFX7-NEXT: v_bfe_i32 v12, v12, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v8, v16, v8
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v9
+; GFX7-NEXT: v_bfe_i32 v13, v13, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v9, v16, v9
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v12
; GFX7-NEXT: v_bfe_i32 v14, v14, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v12, v16, v12
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v13
; GFX7-NEXT: v_bfe_i32 v15, v15, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v13, v16, v13
; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v14
-; GFX7-NEXT: v_max_i32_e32 v14, v14, v16
+; GFX7-NEXT: v_max_i32_e32 v14, v16, v14
; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v15
-; GFX7-NEXT: v_max_i32_e32 v15, v15, v16
-; GFX7-NEXT: v_bfe_i32 v12, v12, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX7-NEXT: v_bfe_i32 v13, v13, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX7-NEXT: v_sub_i32_e32 v15, vcc, 0, v12
-; GFX7-NEXT: v_max_i32_e32 v12, v12, v15
-; GFX7-NEXT: v_sub_i32_e32 v15, vcc, 0, v13
-; GFX7-NEXT: v_max_i32_e32 v13, v13, v15
+; GFX7-NEXT: v_max_i32_e32 v15, v16, v15
; GFX7-NEXT: v_bfe_i32 v10, v10, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX7-NEXT: v_bfe_i32 v11, v11, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
-; GFX7-NEXT: v_sub_i32_e32 v13, vcc, 0, v10
-; GFX7-NEXT: v_max_i32_e32 v10, v10, v13
-; GFX7-NEXT: v_sub_i32_e32 v13, vcc, 0, v11
-; GFX7-NEXT: v_max_i32_e32 v11, v11, v13
-; GFX7-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX7-NEXT: v_bfe_i32 v9, v9, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
-; GFX7-NEXT: v_sub_i32_e32 v11, vcc, 0, v8
-; GFX7-NEXT: v_max_i32_e32 v8, v8, v11
-; GFX7-NEXT: v_sub_i32_e32 v11, vcc, 0, v9
-; GFX7-NEXT: v_max_i32_e32 v9, v9, v11
+; GFX7-NEXT: v_or_b32_e32 v14, v14, v16
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v10
+; GFX7-NEXT: v_max_i32_e32 v10, v16, v10
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v11
+; GFX7-NEXT: v_max_i32_e32 v11, v16, v11
; GFX7-NEXT: v_bfe_i32 v6, v6, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v16, 16, v11
; GFX7-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
-; GFX7-NEXT: v_sub_i32_e32 v9, vcc, 0, v6
-; GFX7-NEXT: v_max_i32_e32 v6, v6, v9
-; GFX7-NEXT: v_sub_i32_e32 v9, vcc, 0, v7
-; GFX7-NEXT: v_max_i32_e32 v7, v7, v9
-; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX7-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX7-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v16
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v6
+; GFX7-NEXT: v_max_i32_e32 v6, v16, v6
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v7
+; GFX7-NEXT: v_max_i32_e32 v7, v16, v7
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v16, 16, v7
; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX7-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v16
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v2
+; GFX7-NEXT: v_max_i32_e32 v2, v16, v2
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v3
+; GFX7-NEXT: v_max_i32_e32 v3, v16, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v16, 16, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v16
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX7-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX7-NEXT: v_alignbit_b32 v9, v10, v8, 16
-; GFX7-NEXT: v_alignbit_b32 v13, v14, v12, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v6
-; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v10
-; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v14
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GFX7-NEXT: v_alignbit_b32 v9, v10, v9, 16
+; GFX7-NEXT: v_alignbit_b32 v13, v14, v13, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v16i16:
@@ -974,303 +958,287 @@ define <32 x i16> @v_abs_v32i16(<32 x i16> %arg) {
; GFX6-LABEL: v_abs_v32i16:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v0
+; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v31, v0
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v1
+; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v1, v31, v1
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v4
+; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v4, v31, v4
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v5
+; GFX6-NEXT: v_bfe_i32 v8, v8, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v5, v31, v5
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v8
+; GFX6-NEXT: v_bfe_i32 v9, v9, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v8, v31, v8
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v9
+; GFX6-NEXT: v_bfe_i32 v12, v12, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v9, v31, v9
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v12
+; GFX6-NEXT: v_bfe_i32 v13, v13, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v12, v31, v12
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v13
+; GFX6-NEXT: v_bfe_i32 v16, v16, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v13, v31, v13
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v16
+; GFX6-NEXT: v_bfe_i32 v17, v17, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v16, v31, v16
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v17
+; GFX6-NEXT: v_bfe_i32 v20, v20, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v17, v31, v17
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v20
+; GFX6-NEXT: v_bfe_i32 v21, v21, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v20, v31, v20
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v21
+; GFX6-NEXT: v_bfe_i32 v24, v24, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v21, v31, v21
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v24
+; GFX6-NEXT: v_bfe_i32 v25, v25, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v24, v31, v24
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v25
; GFX6-NEXT: v_bfe_i32 v28, v28, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v25, v31, v25
; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v28
; GFX6-NEXT: v_bfe_i32 v29, v29, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v28, v28, v31
+; GFX6-NEXT: v_max_i32_e32 v28, v31, v28
; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v29
; GFX6-NEXT: v_bfe_i32 v30, v30, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v29, v29, v31
+; GFX6-NEXT: v_max_i32_e32 v29, v31, v29
; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v30
+; GFX6-NEXT: v_max_i32_e32 v30, v31, v30
+; GFX6-NEXT: buffer_load_dword v31, off, s[0:3], s32
; GFX6-NEXT: v_bfe_i32 v26, v26, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v30, v30, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v26
; GFX6-NEXT: v_bfe_i32 v27, v27, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v26, v26, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v27
-; GFX6-NEXT: v_bfe_i32 v24, v24, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v27, v27, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v24
-; GFX6-NEXT: v_bfe_i32 v25, v25, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v24, v24, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v25
; GFX6-NEXT: v_bfe_i32 v22, v22, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v25, v25, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v22
; GFX6-NEXT: v_bfe_i32 v23, v23, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v22, v22, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v23
-; GFX6-NEXT: v_max_i32_e32 v23, v23, v31
-; GFX6-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; GFX6-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; GFX6-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; GFX6-NEXT: v_or_b32_e32 v22, v22, v23
-; GFX6-NEXT: v_or_b32_e32 v24, v24, v25
-; GFX6-NEXT: v_bfe_i32 v21, v21, 0, 16
-; GFX6-NEXT: v_bfe_i32 v20, v20, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v29, 16, v29
-; GFX6-NEXT: v_or_b32_e32 v28, v28, v29
-; GFX6-NEXT: v_sub_i32_e32 v29, vcc, 0, v20
-; GFX6-NEXT: v_max_i32_e32 v20, v20, v29
; GFX6-NEXT: v_bfe_i32 v18, v18, 0, 16
; GFX6-NEXT: v_bfe_i32 v19, v19, 0, 16
-; GFX6-NEXT: v_bfe_i32 v16, v16, 0, 16
-; GFX6-NEXT: v_bfe_i32 v17, v17, 0, 16
; GFX6-NEXT: v_bfe_i32 v14, v14, 0, 16
; GFX6-NEXT: v_bfe_i32 v15, v15, 0, 16
-; GFX6-NEXT: v_bfe_i32 v12, v12, 0, 16
-; GFX6-NEXT: v_bfe_i32 v13, v13, 0, 16
; GFX6-NEXT: v_bfe_i32 v10, v10, 0, 16
; GFX6-NEXT: v_bfe_i32 v11, v11, 0, 16
-; GFX6-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GFX6-NEXT: v_bfe_i32 v9, v9, 0, 16
; GFX6-NEXT: v_bfe_i32 v6, v6, 0, 16
; GFX6-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
; GFX6-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v27, 16, v27
-; GFX6-NEXT: v_or_b32_e32 v26, v26, v27
-; GFX6-NEXT: v_lshrrev_b32_e32 v27, 16, v26
-; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_bfe_i32 v23, v31, 0, 16
-; GFX6-NEXT: v_sub_i32_e32 v25, vcc, 0, v23
-; GFX6-NEXT: v_max_i32_e32 v23, v23, v25
-; GFX6-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; GFX6-NEXT: v_or_b32_e32 v30, v30, v23
-; GFX6-NEXT: v_sub_i32_e32 v23, vcc, 0, v21
-; GFX6-NEXT: v_max_i32_e32 v21, v21, v23
-; GFX6-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; GFX6-NEXT: v_or_b32_e32 v20, v20, v21
-; GFX6-NEXT: v_sub_i32_e32 v21, vcc, 0, v18
-; GFX6-NEXT: v_max_i32_e32 v18, v18, v21
-; GFX6-NEXT: v_sub_i32_e32 v21, vcc, 0, v19
-; GFX6-NEXT: v_max_i32_e32 v19, v19, v21
-; GFX6-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; GFX6-NEXT: v_or_b32_e32 v18, v18, v19
-; GFX6-NEXT: v_sub_i32_e32 v19, vcc, 0, v16
-; GFX6-NEXT: v_max_i32_e32 v16, v16, v19
-; GFX6-NEXT: v_sub_i32_e32 v19, vcc, 0, v17
-; GFX6-NEXT: v_max_i32_e32 v17, v17, v19
-; GFX6-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; GFX6-NEXT: v_or_b32_e32 v16, v16, v17
-; GFX6-NEXT: v_sub_i32_e32 v17, vcc, 0, v14
-; GFX6-NEXT: v_max_i32_e32 v14, v14, v17
-; GFX6-NEXT: v_sub_i32_e32 v17, vcc, 0, v15
-; GFX6-NEXT: v_max_i32_e32 v15, v15, v17
-; GFX6-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX6-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX6-NEXT: v_sub_i32_e32 v15, vcc, 0, v12
-; GFX6-NEXT: v_max_i32_e32 v12, v12, v15
-; GFX6-NEXT: v_sub_i32_e32 v15, vcc, 0, v13
-; GFX6-NEXT: v_max_i32_e32 v13, v13, v15
-; GFX6-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; GFX6-NEXT: v_or_b32_e32 v12, v12, v13
-; GFX6-NEXT: v_sub_i32_e32 v13, vcc, 0, v10
-; GFX6-NEXT: v_max_i32_e32 v10, v10, v13
-; GFX6-NEXT: v_sub_i32_e32 v13, vcc, 0, v11
-; GFX6-NEXT: v_max_i32_e32 v11, v11, v13
-; GFX6-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX6-NEXT: v_or_b32_e32 v10, v10, v11
-; GFX6-NEXT: v_sub_i32_e32 v11, vcc, 0, v8
-; GFX6-NEXT: v_max_i32_e32 v8, v8, v11
-; GFX6-NEXT: v_sub_i32_e32 v11, vcc, 0, v9
-; GFX6-NEXT: v_max_i32_e32 v9, v9, v11
-; GFX6-NEXT: v_lshlrev_b32_e32 v9, 16, v9
-; GFX6-NEXT: v_or_b32_e32 v8, v8, v9
-; GFX6-NEXT: v_sub_i32_e32 v9, vcc, 0, v6
-; GFX6-NEXT: v_max_i32_e32 v6, v6, v9
-; GFX6-NEXT: v_sub_i32_e32 v9, vcc, 0, v7
-; GFX6-NEXT: v_max_i32_e32 v7, v7, v9
-; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX6-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX6-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX6-NEXT: v_max_i32_e32 v5, v5, v7
-; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX6-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX6-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX6-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; GFX6-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; GFX6-NEXT: v_lshlrev_b32_e32 v25, 16, v25
+; GFX6-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX6-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX6-NEXT: v_alignbit_b32 v9, v10, v8, 16
-; GFX6-NEXT: v_alignbit_b32 v13, v14, v12, 16
-; GFX6-NEXT: v_alignbit_b32 v17, v18, v16, 16
-; GFX6-NEXT: v_alignbit_b32 v21, v22, v20, 16
-; GFX6-NEXT: v_alignbit_b32 v25, v26, v24, 16
-; GFX6-NEXT: v_alignbit_b32 v29, v30, v28, 16
-; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v7, 16, v6
-; GFX6-NEXT: v_lshrrev_b32_e32 v11, 16, v10
-; GFX6-NEXT: v_lshrrev_b32_e32 v15, 16, v14
-; GFX6-NEXT: v_lshrrev_b32_e32 v19, 16, v18
-; GFX6-NEXT: v_lshrrev_b32_e32 v23, 16, v22
-; GFX6-NEXT: v_lshrrev_b32_e32 v31, 16, v30
+; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX6-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX6-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX6-NEXT: v_or_b32_e32 v16, v16, v17
+; GFX6-NEXT: v_or_b32_e32 v20, v20, v21
+; GFX6-NEXT: v_or_b32_e32 v24, v24, v25
+; GFX6-NEXT: v_or_b32_e32 v28, v28, v29
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v31, v31, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v31
+; GFX6-NEXT: v_max_i32_e32 v31, v32, v31
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v31
+; GFX6-NEXT: v_or_b32_e32 v30, v30, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v26
+; GFX6-NEXT: v_max_i32_e32 v26, v32, v26
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v27
+; GFX6-NEXT: v_max_i32_e32 v27, v32, v27
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v27
+; GFX6-NEXT: v_or_b32_e32 v26, v26, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v22
+; GFX6-NEXT: v_max_i32_e32 v22, v32, v22
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v23
+; GFX6-NEXT: v_max_i32_e32 v23, v32, v23
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v23
+; GFX6-NEXT: v_or_b32_e32 v22, v22, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v18
+; GFX6-NEXT: v_max_i32_e32 v18, v32, v18
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v19
+; GFX6-NEXT: v_max_i32_e32 v19, v32, v19
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v19
+; GFX6-NEXT: v_or_b32_e32 v18, v18, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v14
+; GFX6-NEXT: v_max_i32_e32 v14, v32, v14
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v15
+; GFX6-NEXT: v_max_i32_e32 v15, v32, v15
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX6-NEXT: v_or_b32_e32 v14, v14, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v10
+; GFX6-NEXT: v_max_i32_e32 v10, v32, v10
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v11
+; GFX6-NEXT: v_max_i32_e32 v11, v32, v11
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v11
+; GFX6-NEXT: v_or_b32_e32 v10, v10, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v6
+; GFX6-NEXT: v_max_i32_e32 v6, v32, v6
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v7
+; GFX6-NEXT: v_max_i32_e32 v7, v32, v7
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v7
+; GFX6-NEXT: v_or_b32_e32 v6, v6, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v2
+; GFX6-NEXT: v_max_i32_e32 v2, v32, v2
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v3
+; GFX6-NEXT: v_max_i32_e32 v3, v32, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v32
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX6-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GFX6-NEXT: v_alignbit_b32 v9, v10, v9, 16
+; GFX6-NEXT: v_alignbit_b32 v13, v14, v13, 16
+; GFX6-NEXT: v_alignbit_b32 v17, v18, v17, 16
+; GFX6-NEXT: v_alignbit_b32 v21, v22, v21, 16
+; GFX6-NEXT: v_alignbit_b32 v25, v26, v25, 16
+; GFX6-NEXT: v_alignbit_b32 v29, v30, v29, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v32i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v0
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v31, v0
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v1
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v31, v1
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v4
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v4, v31, v4
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v5
+; GFX7-NEXT: v_bfe_i32 v8, v8, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v5, v31, v5
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v8
+; GFX7-NEXT: v_bfe_i32 v9, v9, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v8, v31, v8
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v9
+; GFX7-NEXT: v_bfe_i32 v12, v12, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v9, v31, v9
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v12
+; GFX7-NEXT: v_bfe_i32 v13, v13, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v12, v31, v12
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v13
+; GFX7-NEXT: v_bfe_i32 v16, v16, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v13, v31, v13
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v16
+; GFX7-NEXT: v_bfe_i32 v17, v17, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v16, v31, v16
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v17
+; GFX7-NEXT: v_bfe_i32 v20, v20, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v17, v31, v17
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v20
+; GFX7-NEXT: v_bfe_i32 v21, v21, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v20, v31, v20
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v21
+; GFX7-NEXT: v_bfe_i32 v24, v24, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v21, v31, v21
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v24
+; GFX7-NEXT: v_bfe_i32 v25, v25, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v24, v31, v24
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v25
; GFX7-NEXT: v_bfe_i32 v28, v28, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v25, v31, v25
; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v28
; GFX7-NEXT: v_bfe_i32 v29, v29, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v28, v28, v31
+; GFX7-NEXT: v_max_i32_e32 v28, v31, v28
; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v29
; GFX7-NEXT: v_bfe_i32 v30, v30, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v29, v29, v31
+; GFX7-NEXT: v_max_i32_e32 v29, v31, v29
; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v30
+; GFX7-NEXT: v_max_i32_e32 v30, v31, v30
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
; GFX7-NEXT: v_bfe_i32 v26, v26, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v30, v30, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v26
; GFX7-NEXT: v_bfe_i32 v27, v27, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v26, v26, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v27
-; GFX7-NEXT: v_bfe_i32 v24, v24, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v27, v27, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v24
-; GFX7-NEXT: v_bfe_i32 v25, v25, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v24, v24, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v25
; GFX7-NEXT: v_bfe_i32 v22, v22, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v25, v25, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v22
; GFX7-NEXT: v_bfe_i32 v23, v23, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v22, v22, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v23
-; GFX7-NEXT: v_max_i32_e32 v23, v23, v31
-; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; GFX7-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; GFX7-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; GFX7-NEXT: v_or_b32_e32 v22, v22, v23
-; GFX7-NEXT: v_or_b32_e32 v24, v24, v25
-; GFX7-NEXT: v_bfe_i32 v21, v21, 0, 16
-; GFX7-NEXT: v_bfe_i32 v20, v20, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v29, 16, v29
-; GFX7-NEXT: v_or_b32_e32 v28, v28, v29
-; GFX7-NEXT: v_sub_i32_e32 v29, vcc, 0, v20
-; GFX7-NEXT: v_max_i32_e32 v20, v20, v29
; GFX7-NEXT: v_bfe_i32 v18, v18, 0, 16
; GFX7-NEXT: v_bfe_i32 v19, v19, 0, 16
-; GFX7-NEXT: v_bfe_i32 v16, v16, 0, 16
-; GFX7-NEXT: v_bfe_i32 v17, v17, 0, 16
; GFX7-NEXT: v_bfe_i32 v14, v14, 0, 16
; GFX7-NEXT: v_bfe_i32 v15, v15, 0, 16
-; GFX7-NEXT: v_bfe_i32 v12, v12, 0, 16
-; GFX7-NEXT: v_bfe_i32 v13, v13, 0, 16
; GFX7-NEXT: v_bfe_i32 v10, v10, 0, 16
; GFX7-NEXT: v_bfe_i32 v11, v11, 0, 16
-; GFX7-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GFX7-NEXT: v_bfe_i32 v9, v9, 0, 16
; GFX7-NEXT: v_bfe_i32 v6, v6, 0, 16
; GFX7-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v27, 16, v27
-; GFX7-NEXT: v_or_b32_e32 v26, v26, v27
-; GFX7-NEXT: v_lshrrev_b32_e32 v27, 16, v26
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_bfe_i32 v23, v31, 0, 16
-; GFX7-NEXT: v_sub_i32_e32 v25, vcc, 0, v23
-; GFX7-NEXT: v_max_i32_e32 v23, v23, v25
-; GFX7-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; GFX7-NEXT: v_or_b32_e32 v30, v30, v23
-; GFX7-NEXT: v_sub_i32_e32 v23, vcc, 0, v21
-; GFX7-NEXT: v_max_i32_e32 v21, v21, v23
-; GFX7-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; GFX7-NEXT: v_or_b32_e32 v20, v20, v21
-; GFX7-NEXT: v_sub_i32_e32 v21, vcc, 0, v18
-; GFX7-NEXT: v_max_i32_e32 v18, v18, v21
-; GFX7-NEXT: v_sub_i32_e32 v21, vcc, 0, v19
-; GFX7-NEXT: v_max_i32_e32 v19, v19, v21
-; GFX7-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; GFX7-NEXT: v_or_b32_e32 v18, v18, v19
-; GFX7-NEXT: v_sub_i32_e32 v19, vcc, 0, v16
-; GFX7-NEXT: v_max_i32_e32 v16, v16, v19
-; GFX7-NEXT: v_sub_i32_e32 v19, vcc, 0, v17
-; GFX7-NEXT: v_max_i32_e32 v17, v17, v19
-; GFX7-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; GFX7-NEXT: v_or_b32_e32 v16, v16, v17
-; GFX7-NEXT: v_sub_i32_e32 v17, vcc, 0, v14
-; GFX7-NEXT: v_max_i32_e32 v14, v14, v17
-; GFX7-NEXT: v_sub_i32_e32 v17, vcc, 0, v15
-; GFX7-NEXT: v_max_i32_e32 v15, v15, v17
-; GFX7-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX7-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX7-NEXT: v_sub_i32_e32 v15, vcc, 0, v12
-; GFX7-NEXT: v_max_i32_e32 v12, v12, v15
-; GFX7-NEXT: v_sub_i32_e32 v15, vcc, 0, v13
-; GFX7-NEXT: v_max_i32_e32 v13, v13, v15
-; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
-; GFX7-NEXT: v_sub_i32_e32 v13, vcc, 0, v10
-; GFX7-NEXT: v_max_i32_e32 v10, v10, v13
-; GFX7-NEXT: v_sub_i32_e32 v13, vcc, 0, v11
-; GFX7-NEXT: v_max_i32_e32 v11, v11, v13
-; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
-; GFX7-NEXT: v_sub_i32_e32 v11, vcc, 0, v8
-; GFX7-NEXT: v_max_i32_e32 v8, v8, v11
-; GFX7-NEXT: v_sub_i32_e32 v11, vcc, 0, v9
-; GFX7-NEXT: v_max_i32_e32 v9, v9, v11
-; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
-; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
-; GFX7-NEXT: v_sub_i32_e32 v9, vcc, 0, v6
-; GFX7-NEXT: v_max_i32_e32 v6, v6, v9
-; GFX7-NEXT: v_sub_i32_e32 v9, vcc, 0, v7
-; GFX7-NEXT: v_max_i32_e32 v7, v7, v9
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX7-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX7-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX7-NEXT: v_max_i32_e32 v5, v5, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX7-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; GFX7-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; GFX7-NEXT: v_lshlrev_b32_e32 v25, 16, v25
+; GFX7-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX7-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX7-NEXT: v_alignbit_b32 v9, v10, v8, 16
-; GFX7-NEXT: v_alignbit_b32 v13, v14, v12, 16
-; GFX7-NEXT: v_alignbit_b32 v17, v18, v16, 16
-; GFX7-NEXT: v_alignbit_b32 v21, v22, v20, 16
-; GFX7-NEXT: v_alignbit_b32 v25, v26, v24, 16
-; GFX7-NEXT: v_alignbit_b32 v29, v30, v28, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v6
-; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v10
-; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v14
-; GFX7-NEXT: v_lshrrev_b32_e32 v19, 16, v18
-; GFX7-NEXT: v_lshrrev_b32_e32 v23, 16, v22
-; GFX7-NEXT: v_lshrrev_b32_e32 v31, 16, v30
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_or_b32_e32 v16, v16, v17
+; GFX7-NEXT: v_or_b32_e32 v20, v20, v21
+; GFX7-NEXT: v_or_b32_e32 v24, v24, v25
+; GFX7-NEXT: v_or_b32_e32 v28, v28, v29
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v31, v31, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v31
+; GFX7-NEXT: v_max_i32_e32 v31, v32, v31
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v31
+; GFX7-NEXT: v_or_b32_e32 v30, v30, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v26
+; GFX7-NEXT: v_max_i32_e32 v26, v32, v26
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v27
+; GFX7-NEXT: v_max_i32_e32 v27, v32, v27
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v27
+; GFX7-NEXT: v_or_b32_e32 v26, v26, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v22
+; GFX7-NEXT: v_max_i32_e32 v22, v32, v22
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v23
+; GFX7-NEXT: v_max_i32_e32 v23, v32, v23
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v23
+; GFX7-NEXT: v_or_b32_e32 v22, v22, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v18
+; GFX7-NEXT: v_max_i32_e32 v18, v32, v18
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v19
+; GFX7-NEXT: v_max_i32_e32 v19, v32, v19
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v19
+; GFX7-NEXT: v_or_b32_e32 v18, v18, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v14
+; GFX7-NEXT: v_max_i32_e32 v14, v32, v14
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v15
+; GFX7-NEXT: v_max_i32_e32 v15, v32, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX7-NEXT: v_or_b32_e32 v14, v14, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v10
+; GFX7-NEXT: v_max_i32_e32 v10, v32, v10
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v11
+; GFX7-NEXT: v_max_i32_e32 v11, v32, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v11
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v6
+; GFX7-NEXT: v_max_i32_e32 v6, v32, v6
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v7
+; GFX7-NEXT: v_max_i32_e32 v7, v32, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v2
+; GFX7-NEXT: v_max_i32_e32 v2, v32, v2
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v3
+; GFX7-NEXT: v_max_i32_e32 v3, v32, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v32
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GFX7-NEXT: v_alignbit_b32 v9, v10, v9, 16
+; GFX7-NEXT: v_alignbit_b32 v13, v14, v13, 16
+; GFX7-NEXT: v_alignbit_b32 v17, v18, v17, 16
+; GFX7-NEXT: v_alignbit_b32 v21, v22, v21, 16
+; GFX7-NEXT: v_alignbit_b32 v25, v26, v25, 16
+; GFX7-NEXT: v_alignbit_b32 v29, v30, v29, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v32i16:
diff --git a/llvm/test/CodeGen/AMDGPU/add.v2i16.ll b/llvm/test/CodeGen/AMDGPU/add.v2i16.ll
index d25bfbb..12309f3 100644
--- a/llvm/test/CodeGen/AMDGPU/add.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/add.v2i16.ll
@@ -780,7 +780,7 @@ define amdgpu_kernel void @v_test_add_v2i16_zext_to_v2i64(ptr addrspace(1) %out,
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
@@ -790,11 +790,9 @@ define amdgpu_kernel void @v_test_add_v2i16_zext_to_v2i64(ptr addrspace(1) %out,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v1, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v2, 16, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.h
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v1
; GFX11-TRUE16-NEXT: global_store_b128 v1, v[0:3], s[0:1]
; GFX11-TRUE16-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
index 117af95..74552a5 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
@@ -29177,870 +29177,1844 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: s_branch .LBB19_2
;
-; GFX11-LABEL: bitcast_v64bf16_to_v32i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:168
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:40
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v184, s32
-; GFX11-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
-; GFX11-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
-; GFX11-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
-; GFX11-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
-; GFX11-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
-; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
-; GFX11-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
-; GFX11-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
-; GFX11-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
-; GFX11-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
-; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
-; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
-; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s26, 16
-; GFX11-NEXT: s_and_b32 s4, s26, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: s_lshl_b32 s7, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_and_b32 s5, s25, 0xffff0000
-; GFX11-NEXT: s_and_b32 s4, s24, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
-; GFX11-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v10, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s4, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
-; GFX11-NEXT: v_bfe_u32 v7, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v7, v9
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v151, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_and_b32 s4, s22, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v9, v12, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v11, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s22, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v12
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s4, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v10
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v10
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_lshl_b32 s4, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v11
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_bfe_u32 v12, v16, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s20, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v12, v16
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s4, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_bfe_u32 v18, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_and_b32 s4, s19, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v18, v12
-; GFX11-NEXT: v_bfe_u32 v16, v19, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v16, v19
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v12
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v21, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v19
-; GFX11-NEXT: s_and_b32 s4, s18, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v16, v18, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v21, v17
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v13
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v17
-; GFX11-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: s_lshl_b32 s4, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v18
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v20, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_bfe_u32 v19, v22, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v17, v17, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v16
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v19, v22
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-NEXT: s_lshl_b32 s4, s17, 16
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_bfe_u32 v24, v19, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_and_b32 s4, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v24, v19
-; GFX11-NEXT: v_bfe_u32 v22, v25, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
-; GFX11-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s16, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v22, v25
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
-; GFX11-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v27, v23, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v25
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v22, v24, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v25, v27, v23
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v20
-; GFX11-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v23
-; GFX11-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v24
-; GFX11-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v23, v26, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_bfe_u32 v25, v28, 16, 1
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v22
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v25, v28
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
-; GFX11-NEXT: v_bfe_u32 v30, v25, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v30, v25
-; GFX11-NEXT: v_bfe_u32 v28, v31, 16, 1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v28, v31
-; GFX11-NEXT: v_or_b32_e32 v28, 0x400000, v25
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v33, v29, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v31
-; GFX11-NEXT: s_and_b32 s1, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v28, v30, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v33, v29
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v26
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v29
-; GFX11-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v30
-; GFX11-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v29, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v31, v34, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v34
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v28, v29, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v178
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v30, v31, v34
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
-; GFX11-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; GFX11-NEXT: v_lshl_or_b32 v109, v5, 16, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
-; GFX11-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v29
-; GFX11-NEXT: v_bfe_u32 v35, v31, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v33
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v31
-; GFX11-NEXT: v_or_b32_e32 v37, 0x400000, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v31
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v34, v36, 16, 1
-; GFX11-NEXT: v_bfe_u32 v33, v35, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v180
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v33, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_bfe_u32 v36, v37, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_bfe_u32 v35, v38, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v178, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v36, v37
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v38
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_lshl_or_b32 v179, v32, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v136, v2, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_bfe_u32 v37, v36, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v36
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v181
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v180, v31, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v37
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v36, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v39, v36, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v182, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v38, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v39, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v169
-; GFX11-NEXT: v_lshl_or_b32 v181, v32, 16, v33
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v34
-; GFX11-NEXT: v_bfe_u32 v32, v36, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v176
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v32, v36
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_bfe_u32 v37, v38, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v49, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v170, v33, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v49, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v35, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
-; GFX11-NEXT: v_lshl_or_b32 v169, v31, 16, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v37, v35
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v31, v36, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v177
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v31, v36
-; GFX11-NEXT: v_lshl_or_b32 v176, v33, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v35
-; GFX11-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v36
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
-; GFX11-NEXT: v_bfe_u32 v37, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v50, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
-; GFX11-NEXT: v_bfe_u32 v34, v35, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_bfe_u32 v49, v37, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v50, v38
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v48, v37, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v39, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_lshl_or_b32 v174, v33, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v171, v32, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v48, v37
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v175
-; GFX11-NEXT: v_add_nc_u32_e32 v39, v39, v38
-; GFX11-NEXT: v_lshl_or_b32 v177, v35, 16, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v37
-; GFX11-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_bfe_u32 v39, v34, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v173
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v37, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v39, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v35
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshl_or_b32 v122, v3, 16, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v38
-; GFX11-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v172
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v36, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_or_b32_e32 v55, 0x400000, v48
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v50, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v38, v39, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v39
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX11-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
-; GFX11-NEXT: v_bfe_u32 v51, v48, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v39
-; GFX11-NEXT: v_or_b32_e32 v53, 0x400000, v37
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
-; GFX11-NEXT: v_bfe_u32 v52, v50, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v51, v51, v48
-; GFX11-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v52, v52, v50
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
-; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v50
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v184, v32, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v175, v33, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-NEXT: v_lshl_or_b32 v173, v35, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v97, v8, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v86, v9, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v76, v11, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v67, v14, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v172, v37, 16, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v39
-; GFX11-NEXT: v_lshl_or_b32 v59, v16, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v52, v18, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v46, v21, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v41, v22, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v183, v39, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v37, v24, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v34, v26, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v29, 16, v30
-; GFX11-NEXT: .LBB19_3: ; %end
-; GFX11-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
-; GFX11-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
-; GFX11-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
-; GFX11-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
-; GFX11-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
-; GFX11-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
-; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
-; GFX11-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
-; GFX11-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
-; GFX11-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v184, off, s32
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:288
-; GFX11-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
-; GFX11-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
-; GFX11-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
-; GFX11-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
-; GFX11-NEXT: v_mov_b32_e32 v28, v182
-; GFX11-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
-; GFX11-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
-; GFX11-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
-; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
-; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
-; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB19_2
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v32i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v167, v13 :: v_dual_mov_b32 v176, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v177, v11 :: v_dual_mov_b32 v178, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v179, v9 :: v_dual_mov_b32 v180, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v181, v7 :: v_dual_mov_b32 v182, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v183, v5 :: v_dual_mov_b32 v168, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v169, v3 :: v_dual_mov_b32 v170, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v171, v1 :: v_dual_mov_b32 v172, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v173, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v135, s0 :: v_dual_mov_b32 v134, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v132, s2 :: v_dual_mov_b32 v129, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v125, s16 :: v_dual_mov_b32 v120, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s18 :: v_dual_mov_b32 v107, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s20 :: v_dual_mov_b32 v90, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s22 :: v_dual_mov_b32 v69, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v57, s24 :: v_dual_mov_b32 v44, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s27, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v7, v2 :: v_dual_add_nc_u32 v7, v8, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v9, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v1.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 16, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v44.h, v4.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v57.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v90.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v107.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v120.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v167
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v176
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v176
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v177
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v177
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v177, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v178
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v178
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v178, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v179
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v179, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v180
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v180
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v181
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v181
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v181.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v182
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v182
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v182.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v183
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v183
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v168
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v168
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v168, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v168.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v169
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v169
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v169, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v169.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v170
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v170
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v170, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v170.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v171
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v171
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v171, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v171.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v172
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v172
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v172, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v172.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v173
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v173
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v173, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v173.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v174
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v174
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v174, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v174.h, v0.l
+; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v125 :: v_dual_mov_b32 v5, v120
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v114 :: v_dual_mov_b32 v7, v107
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v99 :: v_dual_mov_b32 v9, v90
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, v57 :: v_dual_mov_b32 v13, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v30 :: v_dual_mov_b32 v17, v173
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v174 :: v_dual_mov_b32 v19, v171
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:280
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v135 :: v_dual_mov_b32 v1, v134
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v132 :: v_dual_mov_b32 v3, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v80 :: v_dual_mov_b32 v11, v69
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v182 :: v_dual_mov_b32 v27, v179
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v180 :: v_dual_mov_b32 v29, v177
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v178 :: v_dual_mov_b32 v31, v167
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v176
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB19_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166
+; GFX11-TRUE16-NEXT: s_branch .LBB19_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v32i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v43, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v44, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v45, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v46, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v47, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v56, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v57, s32 offset:252
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v58, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v59, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v60, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v61, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v62, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v63, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v72, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v76, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v77, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v78, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v79, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v88, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v89, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v90, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v91, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v92, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v107, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v108, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v109, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v110, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v111, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v120, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v121, s32 offset:124
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v122, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v123, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v124, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v125, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v126, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v127, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v136, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v137, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v138, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v139, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v140, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v141, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v142, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v143, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v152, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v153, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v154, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v155, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v156, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v171, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v172, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v173, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v174, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v175, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v184, s32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB19_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-FAKE16-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v3, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v10, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v7, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v151, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v11, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v16, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v12, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v18, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v16, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v21, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v19
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v21, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v13
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v20, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, v17, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v19, v22
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v24, v19
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v22, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v27, v23, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v25
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v24, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, v27, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v20
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v26, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
+; GFX11-FAKE16-NEXT: v_bfe_u32 v25, v28, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v22
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v25, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v30, v25
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v31, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v28, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v33, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v26
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v29, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v34
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, v29, v32
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 16, v178
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, v31, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v109, v5, 16, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v33
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v37, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v180
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v33, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v178, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v36, v37
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v179, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v136, v2, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v181
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v180, v31, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v36, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v182, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v38, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v39, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v169
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v181, v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v37
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 16, v176
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v170, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v49, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v169, v31, 16, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v37, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v177
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v31, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v176, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v50, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v50, 16, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v48, v37, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v174, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v171, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v48, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 16, v175
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, v39, v38
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v177, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v48, 16, v173
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v37, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v39, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v122, v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v172
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v36, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v55, 0x400000, v48
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v39, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v51, v48, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v53, 0x400000, v37
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
+; GFX11-FAKE16-NEXT: v_bfe_u32 v52, v50, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v51, v51, v48
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v52, v52, v50
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v52, 0x400000, v50
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v184, v32, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v175, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v173, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v97, v8, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v86, v9, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v76, v11, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v67, v14, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v172, v37, 16, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v59, v16, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v52, v18, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v46, v21, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v41, v22, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v183, v39, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v24, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v26, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v29, 16, v30
+; GFX11-FAKE16-NEXT: .LBB19_3: ; %end
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v173, off, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_load_b32 v172, off, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_load_b32 v171, off, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_load_b32 v170, off, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_load_b32 v169, off, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_load_b32 v168, off, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v159, off, s32 offset:36
+; GFX11-FAKE16-NEXT: scratch_load_b32 v158, off, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_load_b32 v157, off, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_load_b32 v156, off, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_load_b32 v155, off, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_load_b32 v154, off, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_load_b32 v153, off, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_load_b32 v152, off, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_load_b32 v143, off, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_load_b32 v142, off, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_load_b32 v141, off, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_load_b32 v140, off, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_load_b32 v139, off, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_load_b32 v124, off, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_load_b32 v108, off, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_load_b32 v107, off, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_load_b32 v106, off, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_load_b32 v105, off, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_load_b32 v104, off, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_load_b32 v95, off, s32 offset:164
+; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_load_b32 v91, off, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_load_b32 v90, off, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_load_b32 v89, off, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_load_b32 v88, off, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_load_b32 v79, off, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_load_b32 v78, off, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_load_b32 v60, off, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_load_b32 v44, off, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:288
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v28, v182
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB19_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
+; GFX11-FAKE16-NEXT: s_branch .LBB19_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -66313,870 +67287,1844 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: s_branch .LBB43_2
;
-; GFX11-LABEL: bitcast_v64bf16_to_v32f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:168
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:40
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v184, s32
-; GFX11-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
-; GFX11-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
-; GFX11-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
-; GFX11-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
-; GFX11-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
-; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
-; GFX11-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
-; GFX11-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
-; GFX11-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
-; GFX11-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
-; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
-; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
-; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s26, 16
-; GFX11-NEXT: s_and_b32 s4, s26, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: s_lshl_b32 s7, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_and_b32 s5, s25, 0xffff0000
-; GFX11-NEXT: s_and_b32 s4, s24, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
-; GFX11-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v10, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s4, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
-; GFX11-NEXT: v_bfe_u32 v7, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v7, v9
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v151, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_and_b32 s4, s22, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v9, v12, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v11, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s22, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v12
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s4, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v10
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v10
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_lshl_b32 s4, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v11
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_bfe_u32 v12, v16, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s20, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v12, v16
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s4, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_bfe_u32 v18, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_and_b32 s4, s19, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v18, v12
-; GFX11-NEXT: v_bfe_u32 v16, v19, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v16, v19
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v12
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v21, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v19
-; GFX11-NEXT: s_and_b32 s4, s18, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v16, v18, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v21, v17
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v13
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v17
-; GFX11-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: s_lshl_b32 s4, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v18
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v20, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_bfe_u32 v19, v22, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v17, v17, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v16
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v19, v22
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-NEXT: s_lshl_b32 s4, s17, 16
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_bfe_u32 v24, v19, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_and_b32 s4, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v24, v19
-; GFX11-NEXT: v_bfe_u32 v22, v25, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
-; GFX11-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s16, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v22, v25
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
-; GFX11-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v27, v23, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v25
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v22, v24, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v25, v27, v23
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v20
-; GFX11-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v23
-; GFX11-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v24
-; GFX11-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v23, v26, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_bfe_u32 v25, v28, 16, 1
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v22
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v25, v28
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
-; GFX11-NEXT: v_bfe_u32 v30, v25, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v30, v25
-; GFX11-NEXT: v_bfe_u32 v28, v31, 16, 1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v28, v31
-; GFX11-NEXT: v_or_b32_e32 v28, 0x400000, v25
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v33, v29, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v31
-; GFX11-NEXT: s_and_b32 s1, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v28, v30, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v33, v29
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v26
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v29
-; GFX11-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v30
-; GFX11-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v29, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v31, v34, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v34
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v28, v29, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v178
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v30, v31, v34
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
-; GFX11-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; GFX11-NEXT: v_lshl_or_b32 v109, v5, 16, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
-; GFX11-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v29
-; GFX11-NEXT: v_bfe_u32 v35, v31, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v33
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v31
-; GFX11-NEXT: v_or_b32_e32 v37, 0x400000, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v31
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v34, v36, 16, 1
-; GFX11-NEXT: v_bfe_u32 v33, v35, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v180
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v33, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_bfe_u32 v36, v37, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_bfe_u32 v35, v38, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v178, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v36, v37
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v38
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_lshl_or_b32 v179, v32, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v136, v2, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_bfe_u32 v37, v36, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v36
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v181
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v180, v31, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v37
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v36, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v39, v36, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v182, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v38, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v39, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v169
-; GFX11-NEXT: v_lshl_or_b32 v181, v32, 16, v33
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v34
-; GFX11-NEXT: v_bfe_u32 v32, v36, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v176
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v32, v36
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_bfe_u32 v37, v38, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v49, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v170, v33, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v49, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v35, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
-; GFX11-NEXT: v_lshl_or_b32 v169, v31, 16, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v37, v35
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v31, v36, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v177
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v31, v36
-; GFX11-NEXT: v_lshl_or_b32 v176, v33, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v35
-; GFX11-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v36
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
-; GFX11-NEXT: v_bfe_u32 v37, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v50, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
-; GFX11-NEXT: v_bfe_u32 v34, v35, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_bfe_u32 v49, v37, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v50, v38
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v48, v37, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v39, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_lshl_or_b32 v174, v33, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v171, v32, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v48, v37
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v175
-; GFX11-NEXT: v_add_nc_u32_e32 v39, v39, v38
-; GFX11-NEXT: v_lshl_or_b32 v177, v35, 16, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v37
-; GFX11-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_bfe_u32 v39, v34, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v173
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v37, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v39, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v35
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshl_or_b32 v122, v3, 16, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v38
-; GFX11-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v172
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v36, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_or_b32_e32 v55, 0x400000, v48
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v50, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v38, v39, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v39
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX11-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
-; GFX11-NEXT: v_bfe_u32 v51, v48, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v39
-; GFX11-NEXT: v_or_b32_e32 v53, 0x400000, v37
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
-; GFX11-NEXT: v_bfe_u32 v52, v50, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v51, v51, v48
-; GFX11-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v52, v52, v50
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
-; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v50
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v184, v32, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v175, v33, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-NEXT: v_lshl_or_b32 v173, v35, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v97, v8, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v86, v9, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v76, v11, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v67, v14, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v172, v37, 16, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v39
-; GFX11-NEXT: v_lshl_or_b32 v59, v16, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v52, v18, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v46, v21, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v41, v22, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v183, v39, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v37, v24, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v34, v26, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v29, 16, v30
-; GFX11-NEXT: .LBB43_3: ; %end
-; GFX11-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
-; GFX11-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
-; GFX11-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
-; GFX11-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
-; GFX11-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
-; GFX11-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
-; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
-; GFX11-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
-; GFX11-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
-; GFX11-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v184, off, s32
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:288
-; GFX11-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
-; GFX11-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
-; GFX11-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
-; GFX11-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
-; GFX11-NEXT: v_mov_b32_e32 v28, v182
-; GFX11-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
-; GFX11-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
-; GFX11-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
-; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
-; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
-; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB43_2
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v32f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v167, v13 :: v_dual_mov_b32 v176, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v177, v11 :: v_dual_mov_b32 v178, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v179, v9 :: v_dual_mov_b32 v180, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v181, v7 :: v_dual_mov_b32 v182, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v183, v5 :: v_dual_mov_b32 v168, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v169, v3 :: v_dual_mov_b32 v170, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v171, v1 :: v_dual_mov_b32 v172, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v173, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v135, s0 :: v_dual_mov_b32 v134, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v132, s2 :: v_dual_mov_b32 v129, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v125, s16 :: v_dual_mov_b32 v120, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s18 :: v_dual_mov_b32 v107, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s20 :: v_dual_mov_b32 v90, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s22 :: v_dual_mov_b32 v69, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v57, s24 :: v_dual_mov_b32 v44, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s27, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v7, v2 :: v_dual_add_nc_u32 v7, v8, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v9, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v1.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 16, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v44.h, v4.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v57.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v90.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v107.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v120.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v167
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v176
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v176
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v177
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v177
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v177, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v178
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v178
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v178, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v179
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v179, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v180
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v180
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v181
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v181
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v181.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v182
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v182
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v182.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v183
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v183
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v168
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v168
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v168, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v168.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v169
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v169
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v169, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v169.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v170
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v170
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v170, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v170.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v171
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v171
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v171, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v171.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v172
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v172
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v172, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v172.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v173
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v173
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v173, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v173.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v174
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v174
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v174, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v174.h, v0.l
+; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v125 :: v_dual_mov_b32 v5, v120
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v114 :: v_dual_mov_b32 v7, v107
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v99 :: v_dual_mov_b32 v9, v90
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, v57 :: v_dual_mov_b32 v13, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v30 :: v_dual_mov_b32 v17, v173
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v174 :: v_dual_mov_b32 v19, v171
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:280
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v135 :: v_dual_mov_b32 v1, v134
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v132 :: v_dual_mov_b32 v3, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v80 :: v_dual_mov_b32 v11, v69
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v182 :: v_dual_mov_b32 v27, v179
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v180 :: v_dual_mov_b32 v29, v177
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v178 :: v_dual_mov_b32 v31, v167
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v176
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB43_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166
+; GFX11-TRUE16-NEXT: s_branch .LBB43_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v32f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v43, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v44, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v45, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v46, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v47, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v56, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v57, s32 offset:252
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v58, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v59, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v60, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v61, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v62, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v63, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v72, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v76, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v77, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v78, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v79, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v88, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v89, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v90, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v91, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v92, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v107, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v108, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v109, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v110, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v111, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v120, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v121, s32 offset:124
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v122, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v123, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v124, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v125, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v126, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v127, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v136, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v137, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v138, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v139, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v140, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v141, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v142, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v143, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v152, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v153, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v154, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v155, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v156, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v171, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v172, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v173, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v174, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v175, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v184, s32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-FAKE16-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v3, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v10, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v7, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v151, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v11, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v16, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v12, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v18, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v16, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v21, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v19
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v21, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v13
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v20, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, v17, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v19, v22
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v24, v19
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v22, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v27, v23, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v25
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v24, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, v27, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v20
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v26, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
+; GFX11-FAKE16-NEXT: v_bfe_u32 v25, v28, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v22
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v25, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v30, v25
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v31, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v28, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v33, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v26
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v29, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v34
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, v29, v32
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 16, v178
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, v31, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v109, v5, 16, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v33
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v37, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v180
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v33, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v178, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v36, v37
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v179, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v136, v2, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v181
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v180, v31, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v36, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v182, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v38, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v39, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v169
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v181, v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v37
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 16, v176
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v170, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v49, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v169, v31, 16, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v37, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v177
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v31, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v176, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v50, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v50, 16, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v48, v37, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v174, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v171, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v48, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 16, v175
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, v39, v38
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v177, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v48, 16, v173
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v37, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v39, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v122, v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v172
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v36, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v55, 0x400000, v48
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v39, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v51, v48, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v53, 0x400000, v37
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
+; GFX11-FAKE16-NEXT: v_bfe_u32 v52, v50, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v51, v51, v48
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v52, v52, v50
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v52, 0x400000, v50
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v184, v32, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v175, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v173, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v97, v8, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v86, v9, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v76, v11, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v67, v14, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v172, v37, 16, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v59, v16, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v52, v18, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v46, v21, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v41, v22, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v183, v39, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v24, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v26, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v29, 16, v30
+; GFX11-FAKE16-NEXT: .LBB43_3: ; %end
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v173, off, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_load_b32 v172, off, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_load_b32 v171, off, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_load_b32 v170, off, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_load_b32 v169, off, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_load_b32 v168, off, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v159, off, s32 offset:36
+; GFX11-FAKE16-NEXT: scratch_load_b32 v158, off, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_load_b32 v157, off, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_load_b32 v156, off, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_load_b32 v155, off, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_load_b32 v154, off, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_load_b32 v153, off, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_load_b32 v152, off, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_load_b32 v143, off, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_load_b32 v142, off, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_load_b32 v141, off, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_load_b32 v140, off, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_load_b32 v139, off, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_load_b32 v124, off, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_load_b32 v108, off, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_load_b32 v107, off, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_load_b32 v106, off, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_load_b32 v105, off, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_load_b32 v104, off, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_load_b32 v95, off, s32 offset:164
+; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_load_b32 v91, off, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_load_b32 v90, off, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_load_b32 v89, off, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_load_b32 v88, off, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_load_b32 v79, off, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_load_b32 v78, off, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_load_b32 v60, off, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_load_b32 v44, off, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:288
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v28, v182
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB43_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
+; GFX11-FAKE16-NEXT: s_branch .LBB43_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -101184,870 +103132,1844 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: s_branch .LBB63_2
;
-; GFX11-LABEL: bitcast_v64bf16_to_v16i64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:168
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:40
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v184, s32
-; GFX11-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
-; GFX11-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
-; GFX11-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
-; GFX11-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
-; GFX11-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
-; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
-; GFX11-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
-; GFX11-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
-; GFX11-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
-; GFX11-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
-; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
-; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
-; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_3
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s26, 16
-; GFX11-NEXT: s_and_b32 s4, s26, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: s_lshl_b32 s7, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_and_b32 s5, s25, 0xffff0000
-; GFX11-NEXT: s_and_b32 s4, s24, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
-; GFX11-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v10, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s4, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
-; GFX11-NEXT: v_bfe_u32 v7, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v7, v9
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v151, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_and_b32 s4, s22, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v9, v12, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v11, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s22, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v12
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s4, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v10
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v10
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_lshl_b32 s4, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v11
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_bfe_u32 v12, v16, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s20, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v12, v16
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s4, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_bfe_u32 v18, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_and_b32 s4, s19, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v18, v12
-; GFX11-NEXT: v_bfe_u32 v16, v19, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v16, v19
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v12
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v21, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v19
-; GFX11-NEXT: s_and_b32 s4, s18, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v16, v18, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v21, v17
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v13
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v17
-; GFX11-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: s_lshl_b32 s4, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v18
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v20, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_bfe_u32 v19, v22, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v17, v17, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v16
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v19, v22
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-NEXT: s_lshl_b32 s4, s17, 16
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_bfe_u32 v24, v19, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_and_b32 s4, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v24, v19
-; GFX11-NEXT: v_bfe_u32 v22, v25, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
-; GFX11-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s16, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v22, v25
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
-; GFX11-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v27, v23, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v25
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v22, v24, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v25, v27, v23
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v20
-; GFX11-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v23
-; GFX11-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v24
-; GFX11-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v23, v26, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_bfe_u32 v25, v28, 16, 1
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v22
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v25, v28
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
-; GFX11-NEXT: v_bfe_u32 v30, v25, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v30, v25
-; GFX11-NEXT: v_bfe_u32 v28, v31, 16, 1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v28, v31
-; GFX11-NEXT: v_or_b32_e32 v28, 0x400000, v25
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v33, v29, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v31
-; GFX11-NEXT: s_and_b32 s1, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v28, v30, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v33, v29
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v26
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v29
-; GFX11-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v30
-; GFX11-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v29, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v31, v34, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v34
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v28, v29, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v178
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v30, v31, v34
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
-; GFX11-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; GFX11-NEXT: v_lshl_or_b32 v109, v5, 16, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
-; GFX11-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v29
-; GFX11-NEXT: v_bfe_u32 v35, v31, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v33
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v31
-; GFX11-NEXT: v_or_b32_e32 v37, 0x400000, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v31
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v34, v36, 16, 1
-; GFX11-NEXT: v_bfe_u32 v33, v35, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v180
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v33, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_bfe_u32 v36, v37, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_bfe_u32 v35, v38, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v178, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v36, v37
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v38
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_lshl_or_b32 v179, v32, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v136, v2, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_bfe_u32 v37, v36, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v36
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v181
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v180, v31, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v37
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v36, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v39, v36, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v182, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v38, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v39, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v169
-; GFX11-NEXT: v_lshl_or_b32 v181, v32, 16, v33
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v34
-; GFX11-NEXT: v_bfe_u32 v32, v36, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v176
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v32, v36
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_bfe_u32 v37, v38, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v49, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v170, v33, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v49, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v35, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
-; GFX11-NEXT: v_lshl_or_b32 v169, v31, 16, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v37, v35
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v31, v36, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v177
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v31, v36
-; GFX11-NEXT: v_lshl_or_b32 v176, v33, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v35
-; GFX11-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v36
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
-; GFX11-NEXT: v_bfe_u32 v37, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v50, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
-; GFX11-NEXT: v_bfe_u32 v34, v35, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_bfe_u32 v49, v37, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v50, v38
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v48, v37, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v39, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_lshl_or_b32 v174, v33, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v171, v32, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v48, v37
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v175
-; GFX11-NEXT: v_add_nc_u32_e32 v39, v39, v38
-; GFX11-NEXT: v_lshl_or_b32 v177, v35, 16, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v37
-; GFX11-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_bfe_u32 v39, v34, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v173
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v37, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v39, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v35
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshl_or_b32 v122, v3, 16, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v38
-; GFX11-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v172
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v36, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_or_b32_e32 v55, 0x400000, v48
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v50, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v38, v39, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v39
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX11-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
-; GFX11-NEXT: v_bfe_u32 v51, v48, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v39
-; GFX11-NEXT: v_or_b32_e32 v53, 0x400000, v37
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
-; GFX11-NEXT: v_bfe_u32 v52, v50, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v51, v51, v48
-; GFX11-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v52, v52, v50
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
-; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v50
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v184, v32, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v175, v33, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-NEXT: v_lshl_or_b32 v173, v35, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v97, v8, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v86, v9, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v76, v11, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v67, v14, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v172, v37, 16, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v39
-; GFX11-NEXT: v_lshl_or_b32 v59, v16, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v52, v18, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v46, v21, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v41, v22, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v183, v39, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v37, v24, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v34, v26, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v29, 16, v30
-; GFX11-NEXT: .LBB63_3: ; %end
-; GFX11-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
-; GFX11-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
-; GFX11-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
-; GFX11-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
-; GFX11-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
-; GFX11-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
-; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
-; GFX11-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
-; GFX11-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
-; GFX11-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v184, off, s32
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:288
-; GFX11-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
-; GFX11-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
-; GFX11-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
-; GFX11-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
-; GFX11-NEXT: v_mov_b32_e32 v28, v182
-; GFX11-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
-; GFX11-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
-; GFX11-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
-; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
-; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
-; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB63_2
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v16i64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v167, v13 :: v_dual_mov_b32 v176, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v177, v11 :: v_dual_mov_b32 v178, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v179, v9 :: v_dual_mov_b32 v180, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v181, v7 :: v_dual_mov_b32 v182, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v183, v5 :: v_dual_mov_b32 v168, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v169, v3 :: v_dual_mov_b32 v170, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v171, v1 :: v_dual_mov_b32 v172, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v173, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v135, s0 :: v_dual_mov_b32 v134, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v132, s2 :: v_dual_mov_b32 v129, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v125, s16 :: v_dual_mov_b32 v120, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s18 :: v_dual_mov_b32 v107, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s20 :: v_dual_mov_b32 v90, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s22 :: v_dual_mov_b32 v69, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v57, s24 :: v_dual_mov_b32 v44, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_3
+; GFX11-TRUE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s27, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v7, v2 :: v_dual_add_nc_u32 v7, v8, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v9, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v1.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 16, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v44.h, v4.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v57.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v90.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v107.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v120.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v167
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v176
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v176
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v177
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v177
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v177, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v178
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v178
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v178, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v179
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v179, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v180
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v180
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v181
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v181
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v181.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v182
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v182
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v182.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v183
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v183
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v168
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v168
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v168, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v168.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v169
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v169
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v169, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v169.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v170
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v170
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v170, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v170.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v171
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v171
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v171, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v171.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v172
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v172
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v172, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v172.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v173
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v173
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v173, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v173.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v174
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v174
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v174, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v174.h, v0.l
+; GFX11-TRUE16-NEXT: .LBB63_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v125 :: v_dual_mov_b32 v5, v120
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v114 :: v_dual_mov_b32 v7, v107
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v99 :: v_dual_mov_b32 v9, v90
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, v57 :: v_dual_mov_b32 v13, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v30 :: v_dual_mov_b32 v17, v173
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v174 :: v_dual_mov_b32 v19, v171
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:280
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v135 :: v_dual_mov_b32 v1, v134
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v132 :: v_dual_mov_b32 v3, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v80 :: v_dual_mov_b32 v11, v69
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v182 :: v_dual_mov_b32 v27, v179
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v180 :: v_dual_mov_b32 v29, v177
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v178 :: v_dual_mov_b32 v31, v167
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v176
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB63_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166
+; GFX11-TRUE16-NEXT: s_branch .LBB63_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v16i64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v43, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v44, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v45, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v46, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v47, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v56, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v57, s32 offset:252
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v58, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v59, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v60, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v61, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v62, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v63, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v72, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v76, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v77, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v78, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v79, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v88, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v89, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v90, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v91, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v92, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v107, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v108, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v109, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v110, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v111, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v120, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v121, s32 offset:124
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v122, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v123, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v124, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v125, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v126, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v127, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v136, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v137, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v138, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v139, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v140, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v141, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v142, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v143, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v152, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v153, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v154, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v155, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v156, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v171, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v172, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v173, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v174, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v175, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v184, s32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB63_3
+; GFX11-FAKE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v3, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v10, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v7, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v151, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v11, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v16, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v12, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v18, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v16, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v21, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v19
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v21, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v13
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v20, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, v17, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v19, v22
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v24, v19
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v22, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v27, v23, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v25
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v24, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, v27, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v20
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v26, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
+; GFX11-FAKE16-NEXT: v_bfe_u32 v25, v28, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v22
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v25, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v30, v25
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v31, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v28, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v33, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v26
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v29, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v34
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, v29, v32
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 16, v178
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, v31, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v109, v5, 16, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v33
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v37, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v180
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v33, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v178, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v36, v37
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v179, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v136, v2, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v181
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v180, v31, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v36, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v182, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v38, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v39, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v169
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v181, v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v37
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 16, v176
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v170, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v49, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v169, v31, 16, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v37, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v177
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v31, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v176, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v50, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v50, 16, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v48, v37, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v174, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v171, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v48, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 16, v175
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, v39, v38
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v177, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v48, 16, v173
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v37, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v39, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v122, v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v172
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v36, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v55, 0x400000, v48
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v39, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v51, v48, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v53, 0x400000, v37
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
+; GFX11-FAKE16-NEXT: v_bfe_u32 v52, v50, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v51, v51, v48
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v52, v52, v50
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v52, 0x400000, v50
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v184, v32, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v175, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v173, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v97, v8, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v86, v9, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v76, v11, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v67, v14, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v172, v37, 16, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v59, v16, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v52, v18, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v46, v21, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v41, v22, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v183, v39, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v24, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v26, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v29, 16, v30
+; GFX11-FAKE16-NEXT: .LBB63_3: ; %end
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v173, off, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_load_b32 v172, off, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_load_b32 v171, off, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_load_b32 v170, off, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_load_b32 v169, off, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_load_b32 v168, off, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v159, off, s32 offset:36
+; GFX11-FAKE16-NEXT: scratch_load_b32 v158, off, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_load_b32 v157, off, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_load_b32 v156, off, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_load_b32 v155, off, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_load_b32 v154, off, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_load_b32 v153, off, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_load_b32 v152, off, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_load_b32 v143, off, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_load_b32 v142, off, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_load_b32 v141, off, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_load_b32 v140, off, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_load_b32 v139, off, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_load_b32 v124, off, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_load_b32 v108, off, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_load_b32 v107, off, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_load_b32 v106, off, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_load_b32 v105, off, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_load_b32 v104, off, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_load_b32 v95, off, s32 offset:164
+; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_load_b32 v91, off, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_load_b32 v90, off, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_load_b32 v89, off, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_load_b32 v88, off, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_load_b32 v79, off, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_load_b32 v78, off, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_load_b32 v60, off, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_load_b32 v44, off, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:288
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v28, v182
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB63_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
+; GFX11-FAKE16-NEXT: s_branch .LBB63_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -136197,870 +139119,1844 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: s_branch .LBB79_2
;
-; GFX11-LABEL: bitcast_v64bf16_to_v16f64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:168
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:40
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v184, s32
-; GFX11-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
-; GFX11-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
-; GFX11-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
-; GFX11-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
-; GFX11-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
-; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
-; GFX11-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
-; GFX11-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
-; GFX11-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
-; GFX11-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
-; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
-; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
-; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB79_3
-; GFX11-NEXT: .LBB79_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s26, 16
-; GFX11-NEXT: s_and_b32 s4, s26, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: s_lshl_b32 s7, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_and_b32 s5, s25, 0xffff0000
-; GFX11-NEXT: s_and_b32 s4, s24, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
-; GFX11-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v10, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s4, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
-; GFX11-NEXT: v_bfe_u32 v7, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v7, v9
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v151, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_and_b32 s4, s22, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v9, v12, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v11, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s22, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v12
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s4, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v10
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v10
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_lshl_b32 s4, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v11
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_bfe_u32 v12, v16, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s20, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v12, v16
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s4, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_bfe_u32 v18, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_and_b32 s4, s19, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v18, v12
-; GFX11-NEXT: v_bfe_u32 v16, v19, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v16, v19
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v12
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v21, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v19
-; GFX11-NEXT: s_and_b32 s4, s18, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v16, v18, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v21, v17
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v13
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v17
-; GFX11-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: s_lshl_b32 s4, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v18
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v20, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_bfe_u32 v19, v22, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v17, v17, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v16
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v19, v22
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-NEXT: s_lshl_b32 s4, s17, 16
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_bfe_u32 v24, v19, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_and_b32 s4, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v24, v19
-; GFX11-NEXT: v_bfe_u32 v22, v25, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
-; GFX11-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s16, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v22, v25
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
-; GFX11-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v27, v23, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v25
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v22, v24, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v25, v27, v23
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v20
-; GFX11-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v23
-; GFX11-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v24
-; GFX11-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v23, v26, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_bfe_u32 v25, v28, 16, 1
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v22
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v25, v28
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
-; GFX11-NEXT: v_bfe_u32 v30, v25, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v30, v25
-; GFX11-NEXT: v_bfe_u32 v28, v31, 16, 1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v28, v31
-; GFX11-NEXT: v_or_b32_e32 v28, 0x400000, v25
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v33, v29, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v31
-; GFX11-NEXT: s_and_b32 s1, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v28, v30, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v33, v29
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v26
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v29
-; GFX11-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v30
-; GFX11-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v29, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v31, v34, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v34
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v28, v29, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v178
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v30, v31, v34
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
-; GFX11-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; GFX11-NEXT: v_lshl_or_b32 v109, v5, 16, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
-; GFX11-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v29
-; GFX11-NEXT: v_bfe_u32 v35, v31, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v33
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v31
-; GFX11-NEXT: v_or_b32_e32 v37, 0x400000, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v31
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v34, v36, 16, 1
-; GFX11-NEXT: v_bfe_u32 v33, v35, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v180
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v33, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_bfe_u32 v36, v37, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_bfe_u32 v35, v38, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v178, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v36, v37
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v38
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_lshl_or_b32 v179, v32, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v136, v2, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_bfe_u32 v37, v36, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v36
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v181
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v180, v31, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v37
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v36, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v39, v36, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v182, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v38, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v39, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v169
-; GFX11-NEXT: v_lshl_or_b32 v181, v32, 16, v33
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v34
-; GFX11-NEXT: v_bfe_u32 v32, v36, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v176
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v32, v36
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_bfe_u32 v37, v38, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v49, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v170, v33, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v49, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v35, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
-; GFX11-NEXT: v_lshl_or_b32 v169, v31, 16, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v37, v35
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v31, v36, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v177
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v31, v36
-; GFX11-NEXT: v_lshl_or_b32 v176, v33, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v35
-; GFX11-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v36
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
-; GFX11-NEXT: v_bfe_u32 v37, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v50, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
-; GFX11-NEXT: v_bfe_u32 v34, v35, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_bfe_u32 v49, v37, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v50, v38
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v48, v37, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v39, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_lshl_or_b32 v174, v33, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v171, v32, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v48, v37
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v175
-; GFX11-NEXT: v_add_nc_u32_e32 v39, v39, v38
-; GFX11-NEXT: v_lshl_or_b32 v177, v35, 16, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v37
-; GFX11-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_bfe_u32 v39, v34, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v173
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v37, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v39, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v35
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshl_or_b32 v122, v3, 16, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v38
-; GFX11-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v172
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v36, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_or_b32_e32 v55, 0x400000, v48
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v50, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v38, v39, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v39
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX11-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
-; GFX11-NEXT: v_bfe_u32 v51, v48, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v39
-; GFX11-NEXT: v_or_b32_e32 v53, 0x400000, v37
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
-; GFX11-NEXT: v_bfe_u32 v52, v50, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v51, v51, v48
-; GFX11-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v52, v52, v50
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
-; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v50
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v184, v32, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v175, v33, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-NEXT: v_lshl_or_b32 v173, v35, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v97, v8, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v86, v9, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v76, v11, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v67, v14, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v172, v37, 16, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v39
-; GFX11-NEXT: v_lshl_or_b32 v59, v16, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v52, v18, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v46, v21, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v41, v22, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v183, v39, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v37, v24, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v34, v26, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v29, 16, v30
-; GFX11-NEXT: .LBB79_3: ; %end
-; GFX11-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
-; GFX11-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
-; GFX11-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
-; GFX11-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
-; GFX11-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
-; GFX11-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
-; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
-; GFX11-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
-; GFX11-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
-; GFX11-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v184, off, s32
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:288
-; GFX11-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
-; GFX11-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
-; GFX11-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
-; GFX11-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
-; GFX11-NEXT: v_mov_b32_e32 v28, v182
-; GFX11-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB79_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
-; GFX11-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
-; GFX11-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
-; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
-; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
-; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB79_2
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v16f64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v167, v13 :: v_dual_mov_b32 v176, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v177, v11 :: v_dual_mov_b32 v178, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v179, v9 :: v_dual_mov_b32 v180, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v181, v7 :: v_dual_mov_b32 v182, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v183, v5 :: v_dual_mov_b32 v168, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v169, v3 :: v_dual_mov_b32 v170, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v171, v1 :: v_dual_mov_b32 v172, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v173, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB79_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v135, s0 :: v_dual_mov_b32 v134, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v132, s2 :: v_dual_mov_b32 v129, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v125, s16 :: v_dual_mov_b32 v120, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s18 :: v_dual_mov_b32 v107, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s20 :: v_dual_mov_b32 v90, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s22 :: v_dual_mov_b32 v69, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v57, s24 :: v_dual_mov_b32 v44, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB79_3
+; GFX11-TRUE16-NEXT: .LBB79_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s27, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v7, v2 :: v_dual_add_nc_u32 v7, v8, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v9, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v1.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 16, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v44.h, v4.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v57.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v90.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v107.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v120.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v167
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v176
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v176
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v177
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v177
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v177, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v178
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v178
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v178, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v179
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v179, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v180
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v180
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v181
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v181
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v181.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v182
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v182
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v182.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v183
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v183
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v168
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v168
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v168, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v168.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v169
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v169
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v169, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v169.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v170
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v170
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v170, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v170.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v171
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v171
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v171, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v171.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v172
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v172
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v172, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v172.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v173
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v173
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v173, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v173.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v174
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v174
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v174, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v174.h, v0.l
+; GFX11-TRUE16-NEXT: .LBB79_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v125 :: v_dual_mov_b32 v5, v120
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v114 :: v_dual_mov_b32 v7, v107
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v99 :: v_dual_mov_b32 v9, v90
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, v57 :: v_dual_mov_b32 v13, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v30 :: v_dual_mov_b32 v17, v173
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v174 :: v_dual_mov_b32 v19, v171
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:280
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v135 :: v_dual_mov_b32 v1, v134
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v132 :: v_dual_mov_b32 v3, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v80 :: v_dual_mov_b32 v11, v69
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v182 :: v_dual_mov_b32 v27, v179
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v180 :: v_dual_mov_b32 v29, v177
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v178 :: v_dual_mov_b32 v31, v167
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v176
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB79_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166
+; GFX11-TRUE16-NEXT: s_branch .LBB79_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v16f64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v43, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v44, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v45, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v46, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v47, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v56, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v57, s32 offset:252
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v58, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v59, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v60, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v61, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v62, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v63, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v72, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v76, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v77, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v78, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v79, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v88, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v89, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v90, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v91, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v92, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v107, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v108, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v109, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v110, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v111, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v120, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v121, s32 offset:124
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v122, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v123, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v124, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v125, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v126, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v127, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v136, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v137, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v138, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v139, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v140, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v141, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v142, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v143, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v152, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v153, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v154, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v155, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v156, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v171, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v172, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v173, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v174, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v175, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v184, s32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB79_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB79_3
+; GFX11-FAKE16-NEXT: .LBB79_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v3, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v10, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v7, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v151, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v11, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v16, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v12, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v18, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v16, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v21, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v19
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v21, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v13
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v20, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, v17, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v19, v22
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v24, v19
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v22, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v27, v23, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v25
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v24, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, v27, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v20
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v26, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
+; GFX11-FAKE16-NEXT: v_bfe_u32 v25, v28, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v22
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v25, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v30, v25
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v31, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v28, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v33, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v26
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v29, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v34
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, v29, v32
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 16, v178
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, v31, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v109, v5, 16, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v33
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v37, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v180
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v33, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v178, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v36, v37
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v179, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v136, v2, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v181
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v180, v31, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v36, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v182, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v38, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v39, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v169
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v181, v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v37
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 16, v176
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v170, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v49, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v169, v31, 16, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v37, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v177
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v31, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v176, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v50, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v50, 16, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v48, v37, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v174, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v171, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v48, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 16, v175
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, v39, v38
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v177, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v48, 16, v173
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v37, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v39, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v122, v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v172
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v36, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v55, 0x400000, v48
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v39, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v51, v48, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v53, 0x400000, v37
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
+; GFX11-FAKE16-NEXT: v_bfe_u32 v52, v50, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v51, v51, v48
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v52, v52, v50
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v52, 0x400000, v50
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v184, v32, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v175, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v173, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v97, v8, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v86, v9, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v76, v11, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v67, v14, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v172, v37, 16, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v59, v16, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v52, v18, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v46, v21, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v41, v22, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v183, v39, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v24, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v26, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v29, 16, v30
+; GFX11-FAKE16-NEXT: .LBB79_3: ; %end
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v173, off, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_load_b32 v172, off, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_load_b32 v171, off, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_load_b32 v170, off, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_load_b32 v169, off, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_load_b32 v168, off, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v159, off, s32 offset:36
+; GFX11-FAKE16-NEXT: scratch_load_b32 v158, off, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_load_b32 v157, off, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_load_b32 v156, off, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_load_b32 v155, off, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_load_b32 v154, off, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_load_b32 v153, off, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_load_b32 v152, off, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_load_b32 v143, off, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_load_b32 v142, off, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_load_b32 v141, off, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_load_b32 v140, off, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_load_b32 v139, off, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_load_b32 v124, off, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_load_b32 v108, off, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_load_b32 v107, off, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_load_b32 v106, off, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_load_b32 v105, off, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_load_b32 v104, off, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_load_b32 v95, off, s32 offset:164
+; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_load_b32 v91, off, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_load_b32 v90, off, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_load_b32 v89, off, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_load_b32 v88, off, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_load_b32 v79, off, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_load_b32 v78, off, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_load_b32 v60, off, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_load_b32 v44, off, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:288
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v28, v182
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB79_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
+; GFX11-FAKE16-NEXT: s_branch .LBB79_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -154174,9 +158070,10 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_and_b32 v1, 0xff, v35
; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
@@ -154192,6 +158089,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
@@ -154202,201 +158100,169 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v68
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v4, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v6, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v66
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v37
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v36
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v118
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v69
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v39
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v2, v70
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v48
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v82
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v7, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v81
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v9, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v10, 16, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v84
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v86
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v48
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v3, v82
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v55
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v2, v71
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v51
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v53
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v52
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v83
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v3, v86
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v1, v84
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v85
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v10, v97
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v87
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v99
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v103
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v114
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v98
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v0, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v96
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v54
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v87
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v2, v97
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v102
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v103
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v101
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v100
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v113
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v101
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v116
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v14, v128
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v114
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v1, v113
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v117
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v112
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v117
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v102
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v130
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v133
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v14, v132
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v0, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v116
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v128
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v134
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v132
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v133
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v3, v130
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v0, v161
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v147
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v148
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v118
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xff, v129
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v161
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v166
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v144
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v134
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v18, v147
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v167
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v0, v166
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v144
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v151
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v149
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v180
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v177
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v0, v180
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v149
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v177
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v165
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v162
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v42
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v41
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v0, v42
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v162
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v41
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v178
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v115
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v45
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v44
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v0, v45
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v115
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v44
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v119
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v59
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v56
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v0, v59
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v119
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v56
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v145
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v135
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v60
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v61
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v0, v60
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v135
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v61
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v150
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v146
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v63
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v62
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v0, v63
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v146
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v62
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v160
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v73
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v72
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v0, v73
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v160
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v72
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v176
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v164
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v75
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v74
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v0, v75
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v164
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v74
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v179
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v77
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v76
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v0, v77
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v76
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v183
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v182
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v78
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v79
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v0, v78
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v182
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v79
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v43
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v40
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v89
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v88
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v0, v89
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v40
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v88
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v46
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v91
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v90
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v0, v91
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v46
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v90
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v58
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v57
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v92
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v93
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v0, v92
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v57
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v93
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB89_3
; GFX11-TRUE16-NEXT: .LBB89_2: ; %cmp.true
@@ -154436,57 +158302,59 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(38)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(37)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v58
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(35)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v91, v2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(33)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v43
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v40
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v43, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(31)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v183
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v182
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v89, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(29)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v181
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v88, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v78, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v79, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v179
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v179, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v77, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(27)
@@ -154495,7 +158363,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v164
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(25)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v163
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v163, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v76, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -154506,18 +158374,18 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v74, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v73, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(23)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v150
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v150, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v72, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v146
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(21)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v145
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v135
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v63, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -154525,13 +158393,13 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(19)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v131
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v62, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v60, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v61, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v119
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v119, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v59, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(17)
@@ -154540,29 +158408,29 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v115
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v165
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v56, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v162
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v45, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v44, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v42, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v151
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v41, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v149
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(11)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v148
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v144
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v180, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -154576,8 +158444,8 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v133, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v129
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v161, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v118
@@ -154585,167 +158453,141 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v117
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v116
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v116, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v147, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v114
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v114, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v132, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v130, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v103
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v98
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v54
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v103
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v103, 0x300, v0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v128, v3
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v99
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v54
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v113, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 3, v35
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 3, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v113, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v128, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v100
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v101, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v102, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v101, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v102, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v97, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v55
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v96
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v97, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v100
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xff, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v87, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v51
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v86, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v85, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v84, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v52
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v50
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v83, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v48
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v49
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v87, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v82, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v81, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v71, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v80, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v86, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v85, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v84, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v50
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v49
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v83, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v82, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v81, v5
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v38, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v71, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v80, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v37, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v35
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v69, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v112, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v68, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v67, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v66, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v32
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v65, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v70, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 3, v32
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v69, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v34
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xff, v35
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xff, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v112, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v67, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v68, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, v66, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v51
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v34, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v39, 16, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v50, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v15, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v16, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v116
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v129
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v114, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v144, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v115
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v135
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v145, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v119, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v182
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v2, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v36, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v65, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v34
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v36.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v33.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v133, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v160, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v179, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v29, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v30, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v31, 16, v36
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v103.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v114.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v18.h, v129.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v19.h, v133.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v20.h, v144.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v21.h, v145.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v115.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v119.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v24.h, v131.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v25.h, v135.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v26.h, v150.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v27.h, v160.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v28.h, v179.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v29.h, v181.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.h, v182.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v43.l
; GFX11-TRUE16-NEXT: .LBB89_3: ; %end
; GFX11-TRUE16-NEXT: s_clause 0x1e
; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
@@ -168348,1575 +172190,3138 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v64bf16_to_v128i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_or_saveexec_b32 s4, -1
-; GFX11-NEXT: s_clause 0x3
-; GFX11-NEXT: scratch_store_b32 off, v40, s32
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:12
-; GFX11-NEXT: s_mov_b32 exec_lo, s4
-; GFX11-NEXT: v_writelane_b32 v40, s30, 0
-; GFX11-NEXT: v_writelane_b32 v41, s96, 0
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
-; GFX11-NEXT: v_readfirstlane_b32 s72, v1
-; GFX11-NEXT: v_readfirstlane_b32 s73, v2
-; GFX11-NEXT: v_writelane_b32 v40, s31, 1
-; GFX11-NEXT: v_writelane_b32 v41, s97, 1
-; GFX11-NEXT: v_readfirstlane_b32 s62, v3
-; GFX11-NEXT: v_readfirstlane_b32 s63, v4
-; GFX11-NEXT: v_readfirstlane_b32 s60, v5
-; GFX11-NEXT: v_writelane_b32 v40, s34, 2
-; GFX11-NEXT: v_writelane_b32 v41, s98, 2
-; GFX11-NEXT: v_readfirstlane_b32 s61, v6
-; GFX11-NEXT: v_readfirstlane_b32 s58, v7
-; GFX11-NEXT: v_readfirstlane_b32 s59, v8
-; GFX11-NEXT: v_writelane_b32 v40, s35, 3
-; GFX11-NEXT: v_writelane_b32 v41, s99, 3
-; GFX11-NEXT: v_readfirstlane_b32 s56, v9
-; GFX11-NEXT: v_readfirstlane_b32 s57, v10
-; GFX11-NEXT: v_readfirstlane_b32 s46, v11
-; GFX11-NEXT: v_writelane_b32 v40, s36, 4
-; GFX11-NEXT: v_writelane_b32 v41, s100, 4
-; GFX11-NEXT: v_readfirstlane_b32 s47, v12
-; GFX11-NEXT: v_readfirstlane_b32 s44, v13
-; GFX11-NEXT: v_readfirstlane_b32 s45, v14
-; GFX11-NEXT: v_writelane_b32 v40, s37, 5
-; GFX11-NEXT: v_writelane_b32 v41, s101, 5
-; GFX11-NEXT: s_mov_b32 vcc_hi, 0
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
-; GFX11-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
-; GFX11-NEXT: v_writelane_b32 v40, s38, 6
-; GFX11-NEXT: v_writelane_b32 v41, s102, 6
-; GFX11-NEXT: v_writelane_b32 v40, s39, 7
-; GFX11-NEXT: v_writelane_b32 v41, s103, 7
-; GFX11-NEXT: v_writelane_b32 v40, s48, 8
-; GFX11-NEXT: v_writelane_b32 v41, s104, 8
-; GFX11-NEXT: v_writelane_b32 v40, s49, 9
-; GFX11-NEXT: v_writelane_b32 v40, s50, 10
-; GFX11-NEXT: v_writelane_b32 v40, s51, 11
-; GFX11-NEXT: v_writelane_b32 v40, s52, 12
-; GFX11-NEXT: v_writelane_b32 v40, s53, 13
-; GFX11-NEXT: v_writelane_b32 v40, s54, 14
-; GFX11-NEXT: v_writelane_b32 v40, s55, 15
-; GFX11-NEXT: v_writelane_b32 v40, s64, 16
-; GFX11-NEXT: v_writelane_b32 v40, s65, 17
-; GFX11-NEXT: v_writelane_b32 v40, s66, 18
-; GFX11-NEXT: v_writelane_b32 v40, s67, 19
-; GFX11-NEXT: v_writelane_b32 v40, s68, 20
-; GFX11-NEXT: v_writelane_b32 v40, s69, 21
-; GFX11-NEXT: v_writelane_b32 v40, s70, 22
-; GFX11-NEXT: v_writelane_b32 v40, s71, 23
-; GFX11-NEXT: v_writelane_b32 v40, s80, 24
-; GFX11-NEXT: v_writelane_b32 v40, s81, 25
-; GFX11-NEXT: v_writelane_b32 v40, s82, 26
-; GFX11-NEXT: v_writelane_b32 v40, s83, 27
-; GFX11-NEXT: v_writelane_b32 v40, s84, 28
-; GFX11-NEXT: v_writelane_b32 v40, s85, 29
-; GFX11-NEXT: v_writelane_b32 v40, s86, 30
-; GFX11-NEXT: v_writelane_b32 v40, s87, 31
-; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s4, s27, 24
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[26:27], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 15
-; GFX11-NEXT: s_lshr_b32 s4, s27, 16
-; GFX11-NEXT: s_lshr_b32 s99, s2, 16
-; GFX11-NEXT: s_lshr_b32 s100, s2, 8
-; GFX11-NEXT: s_lshr_b32 s101, s1, 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 14
-; GFX11-NEXT: s_lshr_b32 s4, s27, 8
-; GFX11-NEXT: s_lshr_b32 s11, s1, 16
-; GFX11-NEXT: s_lshr_b32 s102, s1, 8
-; GFX11-NEXT: s_lshr_b32 s103, s0, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 16
-; GFX11-NEXT: s_lshr_b32 s4, s26, 16
-; GFX11-NEXT: s_lshr_b32 s104, s0, 8
-; GFX11-NEXT: s_lshr_b32 s85, s45, 24
-; GFX11-NEXT: s_lshr_b32 s10, s45, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 17
-; GFX11-NEXT: s_lshr_b32 s4, s26, 8
-; GFX11-NEXT: s_lshr_b32 s5, s45, 8
-; GFX11-NEXT: s_lshr_b32 s87, s44, 16
-; GFX11-NEXT: s_lshr_b32 s86, s44, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 18
-; GFX11-NEXT: s_lshr_b32 s4, s25, 24
-; GFX11-NEXT: s_lshr_b32 s81, s47, 24
-; GFX11-NEXT: s_lshr_b32 s98, s47, 16
-; GFX11-NEXT: s_lshr_b32 s84, s47, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 19
-; GFX11-NEXT: s_lshr_b32 s4, s25, 16
-; GFX11-NEXT: s_lshr_b32 s48, s46, 8
-; GFX11-NEXT: s_lshr_b32 s70, s57, 24
-; GFX11-NEXT: s_lshr_b32 s97, s57, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 13
-; GFX11-NEXT: s_lshr_b32 s4, s25, 8
-; GFX11-NEXT: s_lshr_b32 s80, s57, 8
-; GFX11-NEXT: s_lshr_b32 s83, s56, 16
-; GFX11-NEXT: s_lshr_b32 s82, s56, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 20
-; GFX11-NEXT: s_lshr_b32 s4, s24, 16
-; GFX11-NEXT: s_lshr_b32 s66, s59, 24
-; GFX11-NEXT: s_lshr_b32 s9, s59, 16
-; GFX11-NEXT: s_lshr_b32 s69, s59, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 21
-; GFX11-NEXT: s_lshr_b32 s4, s24, 8
-; GFX11-NEXT: s_lshr_b32 s71, s58, 16
-; GFX11-NEXT: s_lshr_b32 s39, s58, 8
-; GFX11-NEXT: s_lshr_b32 s55, s61, 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 22
-; GFX11-NEXT: s_lshr_b32 s4, s23, 24
-; GFX11-NEXT: s_lshr_b32 s8, s61, 16
-; GFX11-NEXT: s_lshr_b32 s65, s61, 8
-; GFX11-NEXT: s_lshr_b32 s68, s60, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 23
-; GFX11-NEXT: s_lshr_b32 s4, s23, 16
-; GFX11-NEXT: s_lshr_b32 s67, s60, 8
-; GFX11-NEXT: s_lshr_b32 s51, s63, 24
-; GFX11-NEXT: s_lshr_b32 s96, s63, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 12
-; GFX11-NEXT: s_lshr_b32 s4, s23, 8
-; GFX11-NEXT: s_lshr_b32 s54, s63, 8
-; GFX11-NEXT: s_lshr_b32 s38, s62, 16
-; GFX11-NEXT: s_lshr_b32 s64, s62, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 24
-; GFX11-NEXT: s_lshr_b32 s4, s22, 16
-; GFX11-NEXT: s_lshr_b32 s36, s73, 24
-; GFX11-NEXT: s_lshr_b32 s7, s73, 16
-; GFX11-NEXT: s_lshr_b32 s50, s73, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 25
-; GFX11-NEXT: s_lshr_b32 s4, s22, 8
-; GFX11-NEXT: s_lshr_b32 s53, s72, 16
-; GFX11-NEXT: s_lshr_b32 s52, s72, 8
-; GFX11-NEXT: s_lshr_b32 s34, s29, 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 26
-; GFX11-NEXT: s_lshr_b32 s4, s21, 24
-; GFX11-NEXT: s_lshr_b32 s6, s29, 16
-; GFX11-NEXT: s_lshr_b32 s35, s29, 8
-; GFX11-NEXT: s_lshr_b32 s37, s28, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 27
-; GFX11-NEXT: s_lshr_b32 s4, s21, 16
-; GFX11-NEXT: s_lshr_b32 s49, s28, 8
-; GFX11-NEXT: s_lshr_b64 s[14:15], s[16:17], 24
-; GFX11-NEXT: s_lshr_b64 s[40:41], s[2:3], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 11
-; GFX11-NEXT: s_lshr_b32 s4, s21, 8
-; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
-; GFX11-NEXT: s_lshr_b64 s[74:75], s[44:45], 24
-; GFX11-NEXT: s_lshr_b64 s[76:77], s[46:47], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 28
-; GFX11-NEXT: s_lshr_b32 s4, s20, 16
-; GFX11-NEXT: s_lshr_b64 s[78:79], s[56:57], 24
-; GFX11-NEXT: s_lshr_b64 s[88:89], s[58:59], 24
-; GFX11-NEXT: s_lshr_b64 s[90:91], s[60:61], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 29
-; GFX11-NEXT: s_lshr_b32 s4, s20, 8
-; GFX11-NEXT: s_lshr_b64 s[92:93], s[62:63], 24
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[72:73], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[28:29], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 30
-; GFX11-NEXT: s_lshr_b32 s4, s19, 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v43, s4, 31
-; GFX11-NEXT: s_lshr_b32 s4, s19, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 10
-; GFX11-NEXT: s_lshr_b32 s4, s19, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 0
-; GFX11-NEXT: s_lshr_b32 s4, s18, 16
-; GFX11-NEXT: v_writelane_b32 v42, s4, 1
-; GFX11-NEXT: s_lshr_b32 s4, s18, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 2
-; GFX11-NEXT: s_lshr_b32 s4, s17, 24
-; GFX11-NEXT: v_writelane_b32 v42, s4, 3
-; GFX11-NEXT: s_lshr_b32 s4, s17, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v43, s4, 9
-; GFX11-NEXT: s_lshr_b32 s4, s17, 8
-; GFX11-NEXT: v_writelane_b32 v42, s4, 4
-; GFX11-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 5
-; GFX11-NEXT: s_lshr_b32 s4, s16, 8
-; GFX11-NEXT: v_writelane_b32 v42, s4, 6
-; GFX11-NEXT: s_lshr_b32 s4, s3, 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 7
-; GFX11-NEXT: s_lshr_b32 s4, s3, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 8
-; GFX11-NEXT: s_lshr_b32 s4, s3, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 8
-; GFX11-NEXT: s_lshr_b32 s4, s46, 16
-; GFX11-NEXT: v_writelane_b32 v43, s12, 6
-; GFX11-NEXT: v_writelane_b32 v43, s13, 7
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[24:25], 24
-; GFX11-NEXT: v_writelane_b32 v43, s12, 4
-; GFX11-NEXT: v_writelane_b32 v43, s13, 5
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[22:23], 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v43, s12, 2
-; GFX11-NEXT: v_writelane_b32 v43, s13, 3
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
-; GFX11-NEXT: v_writelane_b32 v43, s12, 0
-; GFX11-NEXT: v_writelane_b32 v43, s13, 1
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[18:19], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_hi
-; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
-; GFX11-NEXT: .LBB91_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s4, s29, 0xffff0000
-; GFX11-NEXT: s_and_b32 s14, s47, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s15, s47, 16
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s29, 16
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
-; GFX11-NEXT: s_and_b32 s8, s45, 0xffff0000
-; GFX11-NEXT: v_readfirstlane_b32 s47, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: s_lshl_b32 s7, s45, 16
-; GFX11-NEXT: s_and_b32 s78, s28, 0xffff0000
-; GFX11-NEXT: s_bfe_u32 s6, s47, 0x10010
-; GFX11-NEXT: s_lshl_b32 s79, s28, 16
-; GFX11-NEXT: s_add_i32 s45, s6, s47
-; GFX11-NEXT: s_and_b32 s5, s73, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s77, s73, 16
-; GFX11-NEXT: s_and_b32 s75, s72, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s76, s72, 16
-; GFX11-NEXT: s_and_b32 s11, s63, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s74, s63, 16
-; GFX11-NEXT: s_and_b32 s72, s62, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s73, s62, 16
-; GFX11-NEXT: s_and_b32 s63, s61, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s62, s61, 16
-; GFX11-NEXT: s_and_b32 s61, s60, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s60, s60, 16
-; GFX11-NEXT: s_and_b32 s41, s59, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s40, s59, 16
-; GFX11-NEXT: s_and_b32 s28, s58, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s29, s58, 16
-; GFX11-NEXT: s_and_b32 s13, s57, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s10, s57, 16
-; GFX11-NEXT: s_and_b32 s42, s56, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s43, s56, 16
-; GFX11-NEXT: s_and_b32 s12, s46, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s9, s46, 16
-; GFX11-NEXT: s_and_b32 s4, s44, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s44, 16
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s47, 22
-; GFX11-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_cselect_b32 s44, s47, s45
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v2
-; GFX11-NEXT: s_lshr_b32 s58, s44, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s78
-; GFX11-NEXT: v_readfirstlane_b32 s1, v3
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s79
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_bfe_u32 s45, s1, 0x10010
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: s_add_i32 s45, s45, s1
-; GFX11-NEXT: s_bitset1_b32 s1, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s1, s1, s45
-; GFX11-NEXT: s_and_b32 s44, s0, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v2
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s44
-; GFX11-NEXT: v_bfe_u32 v5, v7, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v6
-; GFX11-NEXT: s_lshr_b32 s1, s1, 16
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_readfirstlane_b32 s44, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v7
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v6
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v21
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s5
-; GFX11-NEXT: v_readfirstlane_b32 s0, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s77
-; GFX11-NEXT: s_bfe_u32 s5, s0, 0x10010
-; GFX11-NEXT: v_lshl_or_b32 v7, v22, 16, v4
-; GFX11-NEXT: s_add_i32 s45, s5, s0
-; GFX11-NEXT: s_lshr_b32 s5, s44, 16
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s0, 22
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s0, s0, s45
-; GFX11-NEXT: s_and_b32 s44, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s44
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_bfe_u32 v1, v5, 16, 1
-; GFX11-NEXT: s_lshr_b32 s0, s0, 16
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v23
-; GFX11-NEXT: v_readfirstlane_b32 s44, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v5
-; GFX11-NEXT: v_lshl_or_b32 v6, v2, 16, v3
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v5
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s3
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s76
-; GFX11-NEXT: s_lshr_b32 s59, s44, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s75
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_readfirstlane_b32 s3, v10
-; GFX11-NEXT: v_bfe_u32 v8, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v87, 24, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: s_bfe_u32 s45, s3, 0x10010
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_add_i32 s45, s45, s3
-; GFX11-NEXT: s_bitset1_b32 s3, 22
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s3, s3, s45
-; GFX11-NEXT: s_and_b32 s44, s2, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s44
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v8, v9
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v9
-; GFX11-NEXT: v_readfirstlane_b32 s44, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v24
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s74
-; GFX11-NEXT: v_lshl_or_b32 v14, v25, 16, v5
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s2
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v85, 24, v14
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_readfirstlane_b32 s2, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_bfe_u32 s11, s2, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_add_i32 s45, s11, s2
-; GFX11-NEXT: s_lshr_b32 s11, s44, 16
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s2, 22
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s2, s2, s45
-; GFX11-NEXT: s_and_b32 s44, s17, 0xffff0000
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v26
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s44
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-NEXT: v_lshl_or_b32 v13, v2, 16, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11-NEXT: v_readfirstlane_b32 s44, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v86, 16, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s17, s17, 16
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s73
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s17
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s72
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_readfirstlane_b32 s17, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshr_b32 s72, s44, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
-; GFX11-NEXT: s_bfe_u32 s45, s17, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v27
-; GFX11-NEXT: s_add_i32 s45, s45, s17
-; GFX11-NEXT: s_bitset1_b32 s17, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: v_lshl_or_b32 v16, v28, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_bfe_u32 v8, v1, 16, 1
-; GFX11-NEXT: s_cselect_b32 s17, s17, s45
-; GFX11-NEXT: s_and_b32 s44, s16, 0xffff0000
-; GFX11-NEXT: s_lshr_b32 s17, s17, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s63
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 24, v16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v2
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v5, v3
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v29
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v8, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s44
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_readfirstlane_b32 s44, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s16, s16, 16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s16
-; GFX11-NEXT: s_lshr_b32 s46, s44, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_readfirstlane_b32 s16, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s62
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_bfe_u32 s45, s16, 0x10010
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_add_i32 s45, s45, s16
-; GFX11-NEXT: s_bitset1_b32 s16, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s16, s16, s45
-; GFX11-NEXT: s_and_b32 s44, s19, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s44
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v5
-; GFX11-NEXT: v_bfe_u32 v9, v4, 16, 1
-; GFX11-NEXT: s_lshr_b32 s16, s16, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v8, vcc_lo
-; GFX11-NEXT: v_readfirstlane_b32 s44, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v4
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s60
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v4
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s61
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s19, s19, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v9
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s19
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX11-NEXT: s_lshr_b32 s60, s44, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v1
-; GFX11-NEXT: v_readfirstlane_b32 s19, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_bfe_u32 v3, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v9, v8
-; GFX11-NEXT: s_bfe_u32 s45, s19, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v2
-; GFX11-NEXT: s_add_i32 s45, s45, s19
-; GFX11-NEXT: s_bitset1_b32 s19, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s19, s19, s45
-; GFX11-NEXT: s_and_b32 s44, s18, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s44
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: s_lshr_b32 s19, s19, 16
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s29
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s41
-; GFX11-NEXT: v_readfirstlane_b32 s41, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_pack_ll_b32_b16 s47, s17, s72
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v2
-; GFX11-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-NEXT: s_bfe_u32 s44, s41, 0x10010
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_add_i32 s44, s44, s41
-; GFX11-NEXT: s_bitset1_b32 s41, 22
-; GFX11-NEXT: s_addk_i32 s44, 0x7fff
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s40
-; GFX11-NEXT: s_and_b32 s45, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s41, s41, s44
-; GFX11-NEXT: s_lshl_b32 s18, s18, 16
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v31
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s18
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
-; GFX11-NEXT: v_lshl_or_b32 v18, v30, 16, v4
-; GFX11-NEXT: v_readfirstlane_b32 s18, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshl_or_b32 v17, v1, 16, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: s_bfe_u32 s40, s18, 0x10010
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s28
-; GFX11-NEXT: s_add_i32 s44, s40, s18
-; GFX11-NEXT: s_lshr_b32 s40, s41, 16
-; GFX11-NEXT: s_addk_i32 s44, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s18, 22
-; GFX11-NEXT: s_and_b32 s41, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s18, s18, s44
-; GFX11-NEXT: s_and_b32 s41, s21, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s41
-; GFX11-NEXT: v_bfe_u32 v2, v9, 16, 1
-; GFX11-NEXT: s_lshr_b32 s18, s18, 16
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
-; GFX11-NEXT: v_readfirstlane_b32 s28, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v9
-; GFX11-NEXT: v_bfe_u32 v4, v8, 16, 1
-; GFX11-NEXT: v_bfe_u32 v5, v10, 16, 1
-; GFX11-NEXT: s_bfe_u32 s29, s28, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v1
-; GFX11-NEXT: s_add_i32 s29, s29, s28
-; GFX11-NEXT: s_bitset1_b32 s28, 22
-; GFX11-NEXT: s_addk_i32 s29, 0x7fff
-; GFX11-NEXT: s_and_b32 s41, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s28, s28, s29
-; GFX11-NEXT: s_lshl_b32 s21, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s21
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_lshr_b32 s61, s28, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s44, s2, s11
-; GFX11-NEXT: v_readfirstlane_b32 s21, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: s_bfe_u32 s29, s21, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v2
-; GFX11-NEXT: s_add_i32 s29, s29, s21
-; GFX11-NEXT: s_bitset1_b32 s21, 22
-; GFX11-NEXT: s_addk_i32 s29, 0x7fff
-; GFX11-NEXT: s_and_b32 s28, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s21, s21, s29
-; GFX11-NEXT: s_and_b32 s28, s20, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s28
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v10
-; GFX11-NEXT: s_lshr_b32 s21, s21, 16
-; GFX11-NEXT: s_pack_ll_b32_b16 s45, s3, s59
-; GFX11-NEXT: s_pack_ll_b32_b16 s46, s16, s46
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s13
-; GFX11-NEXT: v_readfirstlane_b32 s13, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 24, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_bfe_u32 s28, s13, 0x10010
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v34
-; GFX11-NEXT: s_add_i32 s28, s28, s13
-; GFX11-NEXT: s_bitset1_b32 s13, 22
-; GFX11-NEXT: s_addk_i32 s28, 0x7fff
-; GFX11-NEXT: s_and_b32 s29, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s13, s13, s28
-; GFX11-NEXT: s_lshl_b32 s20, s20, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s20
-; GFX11-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s10
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v35
-; GFX11-NEXT: v_readfirstlane_b32 s20, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v19, v2, 16, v9
-; GFX11-NEXT: s_bfe_u32 s10, s20, 0x10010
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: s_add_i32 s28, s10, s20
-; GFX11-NEXT: s_lshr_b32 s10, s13, 16
-; GFX11-NEXT: s_addk_i32 s28, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s20, 22
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v3
-; GFX11-NEXT: s_and_b32 s13, vcc_lo, exec_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_cselect_b32 s13, s20, s28
-; GFX11-NEXT: s_and_b32 s20, s23, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s42
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s20
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s43
-; GFX11-NEXT: v_readfirstlane_b32 s28, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_bfe_u32 s20, s28, 0x10010
-; GFX11-NEXT: v_bfe_u32 v4, v8, 16, 1
-; GFX11-NEXT: s_add_i32 s29, s20, s28
-; GFX11-NEXT: s_lshr_b32 s20, s13, 16
-; GFX11-NEXT: s_addk_i32 s29, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s28, 22
-; GFX11-NEXT: s_and_b32 s13, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s13, s28, s29
-; GFX11-NEXT: s_lshl_b32 s23, s23, 16
-; GFX11-NEXT: v_bfe_u32 v5, v9, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s23
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v8
-; GFX11-NEXT: s_lshr_b32 s62, s13, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v9
-; GFX11-NEXT: v_readfirstlane_b32 s23, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: s_bfe_u32 s28, s23, 0x10010
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: s_add_i32 s28, s28, s23
-; GFX11-NEXT: s_bitset1_b32 s23, 22
-; GFX11-NEXT: s_addk_i32 s28, 0x7fff
-; GFX11-NEXT: s_and_b32 s13, vcc_lo, exec_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_cselect_b32 s13, s23, s28
-; GFX11-NEXT: s_and_b32 s23, s22, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s15
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s23
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s14
-; GFX11-NEXT: s_lshr_b32 s23, s13, 16
-; GFX11-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX11-NEXT: v_readfirstlane_b32 s14, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_lshl_or_b32 v71, v37, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s12
-; GFX11-NEXT: s_bfe_u32 s15, s14, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: s_add_i32 s15, s15, s14
-; GFX11-NEXT: s_bitset1_b32 s14, 22
-; GFX11-NEXT: s_addk_i32 s15, 0x7fff
-; GFX11-NEXT: s_and_b32 s13, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s13, s14, s15
-; GFX11-NEXT: s_lshl_b32 s14, s22, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s14
-; GFX11-NEXT: v_bfe_u32 v1, v5, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v38
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v8
-; GFX11-NEXT: s_lshr_b32 s13, s13, 16
-; GFX11-NEXT: v_readfirstlane_b32 s14, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v5
-; GFX11-NEXT: v_lshl_or_b32 v70, v2, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v9
-; GFX11-NEXT: s_bfe_u32 s12, s14, 0x10010
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v8
-; GFX11-NEXT: s_add_i32 s12, s12, s14
-; GFX11-NEXT: s_bitset1_b32 s14, 22
-; GFX11-NEXT: s_addk_i32 s12, 0x7fff
-; GFX11-NEXT: s_and_b32 s15, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s12, s14, s12
-; GFX11-NEXT: s_and_b32 s14, s25, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s14
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s9
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_readfirstlane_b32 s9, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_lshr_b32 s22, s12, 16
-; GFX11-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: s_bfe_u32 s14, s9, 0x10010
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_add_i32 s14, s14, s9
-; GFX11-NEXT: s_bitset1_b32 s9, 22
-; GFX11-NEXT: s_addk_i32 s14, 0x7fff
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: s_and_b32 s12, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s9, s9, s14
-; GFX11-NEXT: s_lshl_b32 s12, s25, 16
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s12
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v3, v4
-; GFX11-NEXT: s_lshr_b32 s63, s9, 16
-; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX11-NEXT: v_readfirstlane_b32 s8, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v8
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v8
-; GFX11-NEXT: s_bfe_u32 s12, s8, 0x10010
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
-; GFX11-NEXT: s_add_i32 s12, s12, s8
-; GFX11-NEXT: s_bitset1_b32 s8, 22
-; GFX11-NEXT: s_addk_i32 s12, 0x7fff
-; GFX11-NEXT: s_and_b32 s9, vcc_lo, exec_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_cselect_b32 s8, s8, s12
-; GFX11-NEXT: s_and_b32 s9, s24, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_lshr_b32 s25, s8, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v12, v9
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s6
-; GFX11-NEXT: v_readfirstlane_b32 s7, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v9
-; GFX11-NEXT: s_pack_ll_b32_b16 s28, s0, s5
-; GFX11-NEXT: s_bfe_u32 s9, s7, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v3
-; GFX11-NEXT: s_add_i32 s9, s9, s7
-; GFX11-NEXT: s_bitset1_b32 s7, 22
-; GFX11-NEXT: s_addk_i32 s9, 0x7fff
-; GFX11-NEXT: s_and_b32 s8, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s7, s7, s9
-; GFX11-NEXT: s_lshl_b32 s8, s24, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v4, v8, 16, 1
-; GFX11-NEXT: s_lshr_b32 s12, s7, 16
-; GFX11-NEXT: v_readfirstlane_b32 s8, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v8
-; GFX11-NEXT: v_bfe_u32 v10, v12, 16, 1
-; GFX11-NEXT: s_bfe_u32 s4, s8, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v2
-; GFX11-NEXT: s_add_i32 s4, s4, s8
-; GFX11-NEXT: s_bitset1_b32 s8, 22
-; GFX11-NEXT: s_addk_i32 s4, 0x7fff
-; GFX11-NEXT: s_and_b32 s6, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s4, s8, s4
-; GFX11-NEXT: s_and_b32 s6, s27, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v52, 0x40c00000, s6
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v12
-; GFX11-NEXT: s_lshr_b32 s24, s4, 16
-; GFX11-NEXT: v_readfirstlane_b32 s6, v52
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
-; GFX11-NEXT: v_bfe_u32 v4, v9, 16, 1
-; GFX11-NEXT: s_bfe_u32 s7, s6, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_add_i32 s7, s7, s6
-; GFX11-NEXT: s_bitset1_b32 s6, 22
-; GFX11-NEXT: s_addk_i32 s7, 0x7fff
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s4, s6, s7
-; GFX11-NEXT: s_lshl_b32 s6, s27, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v4, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v12
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_lshr_b32 s73, s4, 16
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v49
-; GFX11-NEXT: v_readfirstlane_b32 s6, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v51
-; GFX11-NEXT: v_lshl_or_b32 v66, v1, 16, v11
-; GFX11-NEXT: s_bfe_u32 s7, s6, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_add_i32 s7, s7, s6
-; GFX11-NEXT: s_bitset1_b32 s6, 22
-; GFX11-NEXT: s_addk_i32 s7, 0x7fff
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s4, s6, s7
-; GFX11-NEXT: s_and_b32 s6, s26, 0xffff0000
-; GFX11-NEXT: s_lshr_b32 s27, s4, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s6
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v52
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v39
-; GFX11-NEXT: v_lshl_or_b32 v55, v50, 16, v4
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s22, s13
-; GFX11-NEXT: v_readfirstlane_b32 s6, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_lshl_or_b32 v54, v2, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v67, v48, 16, v5
-; GFX11-NEXT: v_lshrrev_b64 v[8:9], 24, v[17:18]
-; GFX11-NEXT: s_bfe_u32 s5, s6, 0x10010
-; GFX11-NEXT: v_lshrrev_b64 v[9:10], 24, v[15:16]
-; GFX11-NEXT: s_add_i32 s5, s5, s6
-; GFX11-NEXT: s_bitset1_b32 s6, 22
-; GFX11-NEXT: s_addk_i32 s5, 0x7fff
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s14, s6, s5
-; GFX11-NEXT: s_lshl_b32 s4, s26, 16
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s20, s10
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
-; GFX11-NEXT: s_lshr_b32 s13, s14, 16
-; GFX11-NEXT: v_lshrrev_b64 v[10:11], 24, v[13:14]
-; GFX11-NEXT: v_lshrrev_b64 v[11:12], 24, v[6:7]
-; GFX11-NEXT: s_pack_ll_b32_b16 s29, s1, s58
-; GFX11-NEXT: v_readfirstlane_b32 s11, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_lshrrev_b64 v[1:2], 24, v[54:55]
-; GFX11-NEXT: v_lshrrev_b64 v[2:3], 24, v[66:67]
-; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[70:71]
-; GFX11-NEXT: s_bfe_u32 s10, s11, 0x10010
-; GFX11-NEXT: v_lshrrev_b64 v[4:5], 24, v[19:20]
-; GFX11-NEXT: s_add_i32 s10, s10, s11
-; GFX11-NEXT: s_bitset1_b32 s11, 22
-; GFX11-NEXT: s_addk_i32 s10, 0x7fff
-; GFX11-NEXT: s_and_b32 s14, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s10, s11, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s19, s60
-; GFX11-NEXT: s_lshr_b32 s26, s10, 16
-; GFX11-NEXT: s_pack_ll_b32_b16 s4, s18, s40
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s23, s62
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v55
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 8, v55
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v54
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 8, v54
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 24, v67
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 8, v67
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v66
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 8, v66
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 24, v71
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 8, v71
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v70
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 8, v70
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 24, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 8, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 8, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 8, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 8, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 8, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 8, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 8, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 8, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 8, v6
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s21, s61
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s25, s63
-; GFX11-NEXT: s_pack_ll_b32_b16 s57, s27, s73
-; GFX11-NEXT: s_pack_ll_b32_b16 s56, s26, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s24, s12
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[8:9], 24
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[4:5], 24
-; GFX11-NEXT: s_lshr_b64 s[14:15], s[46:47], 24
-; GFX11-NEXT: s_lshr_b64 s[40:41], s[44:45], 24
-; GFX11-NEXT: s_lshr_b64 s[42:43], s[28:29], 24
-; GFX11-NEXT: s_lshr_b64 vcc, s[56:57], 24
-; GFX11-NEXT: s_lshr_b64 s[34:35], s[10:11], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[6:7], 24
-; GFX11-NEXT: s_lshr_b32 s13, s57, 24
-; GFX11-NEXT: s_lshr_b32 s15, s57, 8
-; GFX11-NEXT: s_lshr_b32 s41, s56, 16
-; GFX11-NEXT: s_lshr_b32 s43, s56, 8
-; GFX11-NEXT: s_lshr_b32 s56, s11, 24
-; GFX11-NEXT: s_lshr_b32 s11, s11, 8
-; GFX11-NEXT: s_lshr_b32 s57, s10, 16
-; GFX11-NEXT: s_lshr_b32 s10, s10, 8
-; GFX11-NEXT: s_lshr_b32 s74, s9, 24
-; GFX11-NEXT: s_lshr_b32 s9, s9, 8
-; GFX11-NEXT: s_lshr_b32 s75, s8, 16
-; GFX11-NEXT: s_lshr_b32 s8, s8, 8
-; GFX11-NEXT: s_lshr_b32 s76, s7, 24
-; GFX11-NEXT: s_lshr_b32 s77, s7, 8
-; GFX11-NEXT: s_lshr_b32 s78, s6, 16
-; GFX11-NEXT: s_lshr_b32 s79, s6, 8
-; GFX11-NEXT: s_lshr_b32 s88, s5, 24
-; GFX11-NEXT: s_lshr_b32 s89, s5, 8
-; GFX11-NEXT: s_lshr_b32 s90, s4, 16
-; GFX11-NEXT: s_lshr_b32 s91, s4, 8
-; GFX11-NEXT: s_lshr_b32 s92, s47, 24
-; GFX11-NEXT: s_lshr_b32 s47, s47, 8
-; GFX11-NEXT: s_lshr_b32 s93, s46, 16
-; GFX11-NEXT: s_lshr_b32 s46, s46, 8
-; GFX11-NEXT: s_lshr_b32 s95, s45, 24
-; GFX11-NEXT: s_lshr_b32 s45, s45, 8
-; GFX11-NEXT: s_lshr_b32 s99, s44, 16
-; GFX11-NEXT: s_lshr_b32 s100, s44, 8
-; GFX11-NEXT: s_lshr_b32 s101, s29, 24
-; GFX11-NEXT: s_lshr_b32 s102, s29, 8
-; GFX11-NEXT: s_lshr_b32 s103, s28, 16
-; GFX11-NEXT: s_lshr_b32 s104, s28, 8
-; GFX11-NEXT: s_branch .LBB91_5
-; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr99
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr53
-; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr68
-; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr55
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr71
-; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr83
-; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr86
-; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s4, 0
-; GFX11-NEXT: v_writelane_b32 v43, s5, 1
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s4, 2
-; GFX11-NEXT: v_writelane_b32 v43, s5, 3
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s74, 4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s75, 5
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: v_writelane_b32 v43, s74, 6
-; GFX11-NEXT: v_writelane_b32 v43, s75, 7
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: s_branch .LBB91_2
-; GFX11-NEXT: .LBB91_4:
-; GFX11-NEXT: v_dual_mov_b32 v10, s94 :: v_dual_mov_b32 v11, s30
-; GFX11-NEXT: v_readlane_b32 s94, v43, 2
-; GFX11-NEXT: v_dual_mov_b32 v96, s37 :: v_dual_mov_b32 v87, s34
-; GFX11-NEXT: v_dual_mov_b32 v6, s49 :: v_dual_mov_b32 v7, s35
-; GFX11-NEXT: v_readlane_b32 s95, v43, 3
-; GFX11-NEXT: v_readlane_b32 vcc_lo, v43, 6
-; GFX11-NEXT: v_readlane_b32 s30, v43, 0
-; GFX11-NEXT: v_readlane_b32 s34, v43, 4
-; GFX11-NEXT: v_dual_mov_b32 v52, s44 :: v_dual_mov_b32 v51, s45
-; GFX11-NEXT: v_dual_mov_b32 v50, s10 :: v_dual_mov_b32 v49, s46
-; GFX11-NEXT: v_dual_mov_b32 v39, s47 :: v_dual_mov_b32 v48, s98
-; GFX11-NEXT: v_dual_mov_b32 v38, s56 :: v_dual_mov_b32 v37, s97
-; GFX11-NEXT: v_dual_mov_b32 v36, s57 :: v_dual_mov_b32 v35, s58
-; GFX11-NEXT: v_dual_mov_b32 v34, s59 :: v_dual_mov_b32 v33, s9
-; GFX11-NEXT: v_dual_mov_b32 v32, s60 :: v_dual_mov_b32 v31, s61
-; GFX11-NEXT: v_dual_mov_b32 v30, s8 :: v_dual_mov_b32 v29, s62
-; GFX11-NEXT: v_dual_mov_b32 v27, s63 :: v_dual_mov_b32 v28, s96
-; GFX11-NEXT: v_dual_mov_b32 v26, s72 :: v_dual_mov_b32 v25, s7
-; GFX11-NEXT: v_dual_mov_b32 v24, s73 :: v_dual_mov_b32 v23, s28
-; GFX11-NEXT: v_dual_mov_b32 v21, s29 :: v_dual_mov_b32 v22, s6
-; GFX11-NEXT: v_dual_mov_b32 v53, s87 :: v_dual_mov_b32 v54, s86
-; GFX11-NEXT: v_dual_mov_b32 v5, s85 :: v_dual_mov_b32 v12, s5
-; GFX11-NEXT: v_dual_mov_b32 v65, s4 :: v_dual_mov_b32 v66, s48
-; GFX11-NEXT: v_dual_mov_b32 v55, s81 :: v_dual_mov_b32 v64, s84
-; GFX11-NEXT: v_dual_mov_b32 v69, s83 :: v_dual_mov_b32 v70, s82
-; GFX11-NEXT: v_dual_mov_b32 v67, s70 :: v_dual_mov_b32 v68, s80
-; GFX11-NEXT: v_dual_mov_b32 v80, s71 :: v_dual_mov_b32 v19, s39
-; GFX11-NEXT: v_dual_mov_b32 v71, s66 :: v_dual_mov_b32 v20, s69
-; GFX11-NEXT: v_dual_mov_b32 v82, s68 :: v_dual_mov_b32 v17, s67
-; GFX11-NEXT: v_dual_mov_b32 v81, s55 :: v_dual_mov_b32 v18, s65
-; GFX11-NEXT: v_dual_mov_b32 v84, s38 :: v_dual_mov_b32 v15, s64
-; GFX11-NEXT: v_dual_mov_b32 v83, s51 :: v_dual_mov_b32 v16, s54
-; GFX11-NEXT: v_dual_mov_b32 v86, s53 :: v_dual_mov_b32 v13, s52
-; GFX11-NEXT: v_dual_mov_b32 v85, s36 :: v_dual_mov_b32 v14, s50
-; GFX11-NEXT: v_dual_mov_b32 v1, s74 :: v_dual_mov_b32 v2, s76
-; GFX11-NEXT: v_dual_mov_b32 v3, s78 :: v_dual_mov_b32 v4, s88
-; GFX11-NEXT: v_dual_mov_b32 v8, s90 :: v_dual_mov_b32 v9, s92
-; GFX11-NEXT: s_mov_b32 s58, s11
-; GFX11-NEXT: v_readlane_b32 s59, v43, 8
-; GFX11-NEXT: v_readlane_b32 s72, v43, 9
-; GFX11-NEXT: v_readlane_b32 s60, v43, 10
-; GFX11-NEXT: v_readlane_b32 s61, v43, 11
-; GFX11-NEXT: v_readlane_b32 s62, v43, 12
-; GFX11-NEXT: v_readlane_b32 s63, v43, 13
-; GFX11-NEXT: v_readlane_b32 s73, v43, 14
-; GFX11-NEXT: v_readlane_b32 s13, v43, 15
-; GFX11-NEXT: v_readlane_b32 s15, v43, 16
-; GFX11-NEXT: v_readlane_b32 s41, v43, 17
-; GFX11-NEXT: v_readlane_b32 s43, v43, 18
-; GFX11-NEXT: v_readlane_b32 s56, v43, 19
-; GFX11-NEXT: v_readlane_b32 s11, v43, 20
-; GFX11-NEXT: v_readlane_b32 s57, v43, 21
-; GFX11-NEXT: v_readlane_b32 s10, v43, 22
-; GFX11-NEXT: v_readlane_b32 s74, v43, 23
-; GFX11-NEXT: v_readlane_b32 s9, v43, 24
-; GFX11-NEXT: v_readlane_b32 s75, v43, 25
-; GFX11-NEXT: v_readlane_b32 s8, v43, 26
-; GFX11-NEXT: v_readlane_b32 s76, v43, 27
-; GFX11-NEXT: v_readlane_b32 s77, v43, 28
-; GFX11-NEXT: v_readlane_b32 s78, v43, 29
-; GFX11-NEXT: v_readlane_b32 s79, v43, 30
-; GFX11-NEXT: v_readlane_b32 s88, v43, 31
-; GFX11-NEXT: v_readlane_b32 s89, v42, 0
-; GFX11-NEXT: v_readlane_b32 s90, v42, 1
-; GFX11-NEXT: v_readlane_b32 s91, v42, 2
-; GFX11-NEXT: v_readlane_b32 s92, v42, 3
-; GFX11-NEXT: v_readlane_b32 s47, v42, 4
-; GFX11-NEXT: v_readlane_b32 s93, v42, 5
-; GFX11-NEXT: v_readlane_b32 vcc_hi, v43, 7
-; GFX11-NEXT: v_readlane_b32 s46, v42, 6
-; GFX11-NEXT: v_readlane_b32 s31, v43, 1
-; GFX11-NEXT: v_readlane_b32 s95, v42, 7
-; GFX11-NEXT: v_readlane_b32 s45, v42, 8
-; GFX11-NEXT: v_readlane_b32 s35, v43, 5
-; GFX11-NEXT: .LBB91_5: ; %end
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s104, 8
-; GFX11-NEXT: s_and_b32 s5, s103, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s42, 8
-; GFX11-NEXT: s_or_b32 s0, s0, s4
-; GFX11-NEXT: s_or_b32 s4, s5, s6
-; GFX11-NEXT: s_and_b32 s1, s1, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s102, 8
-; GFX11-NEXT: s_and_b32 s6, s58, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s101, 8
-; GFX11-NEXT: s_or_b32 s1, s1, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_or_b32 s0, s0, s4
-; GFX11-NEXT: s_or_b32 s1, s1, s5
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s100, 8
-; GFX11-NEXT: s_and_b32 s5, s99, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s40, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s4
-; GFX11-NEXT: s_or_b32 s4, s5, s6
-; GFX11-NEXT: s_and_b32 s3, s3, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s45, 8
-; GFX11-NEXT: s_and_b32 s6, s59, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s95, 8
-; GFX11-NEXT: s_or_b32 s3, s3, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: s_and_b32 s3, s3, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_or_b32 s2, s2, s4
-; GFX11-NEXT: s_or_b32 s3, s3, s5
-; GFX11-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
-; GFX11-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
-; GFX11-NEXT: s_and_b32 s0, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s46, 8
-; GFX11-NEXT: s_and_b32 s2, s93, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s14, 8
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: s_and_b32 s2, s17, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s47, 8
-; GFX11-NEXT: s_and_b32 s4, s72, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s92, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: s_and_b32 s2, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s91, 8
-; GFX11-NEXT: s_and_b32 s4, s90, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s12, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: s_and_b32 s4, s19, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s89, 8
-; GFX11-NEXT: s_and_b32 s6, s60, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s88, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_mov_b32 v113, s1
-; GFX11-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_mov_b32 v115, s3
-; GFX11-NEXT: s_and_b32 s0, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s79, 8
-; GFX11-NEXT: s_and_b32 s2, s78, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s30, 8
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: s_and_b32 s2, s21, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s77, 8
-; GFX11-NEXT: s_and_b32 s4, s61, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s76, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: s_and_b32 s2, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s8, 8
-; GFX11-NEXT: s_and_b32 s4, s75, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s94, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: s_and_b32 s4, s23, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s9, 8
-; GFX11-NEXT: s_and_b32 s6, s62, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s74, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
-; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: scratch_store_b128 v0, v[97:100], off
-; GFX11-NEXT: scratch_store_b128 v0, v[112:115], off offset:16
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
-; GFX11-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
-; GFX11-NEXT: s_and_b32 s0, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s10, 8
-; GFX11-NEXT: s_and_b32 s2, s57, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s34, 8
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, s4
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_lshl_b32 s2, s11, 8
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_and_b32 s1, s25, 0xff
-; GFX11-NEXT: s_and_b32 s3, s63, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s56, 8
-; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: s_or_b32 s2, s3, s4
-; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: s_and_b32 s3, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s43, 8
-; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: s_or_b32 s2, s3, s4
-; GFX11-NEXT: s_and_b32 s3, s41, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, vcc_lo, 8
-; GFX11-NEXT: s_lshl_b32 s5, s15, 8
-; GFX11-NEXT: s_or_b32 s3, s3, s4
-; GFX11-NEXT: s_and_b32 s4, s27, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s13, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_and_b32 s5, s73, 0xff
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_and_b32 v23, 0xff, v23
-; GFX11-NEXT: v_dual_mov_b32 v113, s1 :: v_dual_lshlrev_b32 v6, 8, v6
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_lshlrev_b32 v11, 8, v11
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: v_dual_mov_b32 v115, s3 :: v_dual_and_b32 v96, 0xff, v96
-; GFX11-NEXT: v_or_b32_e32 v6, v23, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_or_b32_e32 v11, v96, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 8, v10
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v24, 0xff, v24
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 8, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v8, 8, v8
-; GFX11-NEXT: v_or_b32_e32 v23, v6, v11
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v21
-; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v87
-; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v26
-; GFX11-NEXT: v_and_b32_e32 v26, 0xff, v86
-; GFX11-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
-; GFX11-NEXT: v_or_b32_e32 v7, v11, v21
-; GFX11-NEXT: v_or_b32_e32 v11, v22, v13
-; GFX11-NEXT: v_or_b32_e32 v10, v26, v10
-; GFX11-NEXT: v_or_b32_e32 v13, v24, v14
-; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v25
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v85
-; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v29
-; GFX11-NEXT: v_and_b32_e32 v24, 0xff, v84
-; GFX11-NEXT: v_and_b32_e32 v25, 0xff, v27
-; GFX11-NEXT: v_and_b32_e32 v26, 0xff, v28
-; GFX11-NEXT: v_lshlrev_b32_e32 v27, 8, v83
-; GFX11-NEXT: v_or_b32_e32 v14, v14, v21
-; GFX11-NEXT: v_or_b32_e32 v15, v22, v15
-; GFX11-NEXT: v_or_b32_e32 v9, v24, v9
-; GFX11-NEXT: v_or_b32_e32 v16, v25, v16
-; GFX11-NEXT: v_or_b32_e32 v21, v26, v27
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_or_b32_e32 v24, v6, v7
-; GFX11-NEXT: v_or_b32_e32 v25, v11, v10
-; GFX11-NEXT: v_or_b32_e32 v26, v13, v14
-; GFX11-NEXT: v_or_b32_e32 v6, v15, v9
-; GFX11-NEXT: v_or_b32_e32 v7, v16, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 8, v17
-; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v82
-; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v31
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v30
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v81
-; GFX11-NEXT: v_and_b32_e32 v17, 0xff, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v19
-; GFX11-NEXT: v_or_b32_e32 v9, v9, v10
-; GFX11-NEXT: v_or_b32_e32 v8, v11, v8
-; GFX11-NEXT: v_or_b32_e32 v10, v13, v14
-; GFX11-NEXT: v_or_b32_e32 v11, v15, v16
-; GFX11-NEXT: v_or_b32_e32 v13, v17, v18
-; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v80
-; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v34
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v20
-; GFX11-NEXT: v_and_b32_e32 v17, 0xff, v33
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v71
-; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v70
-; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v69
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 8, v3
-; GFX11-NEXT: v_or_b32_e32 v4, v14, v4
-; GFX11-NEXT: v_or_b32_e32 v14, v15, v16
-; GFX11-NEXT: v_or_b32_e32 v15, v17, v18
-; GFX11-NEXT: v_or_b32_e32 v16, v19, v20
-; GFX11-NEXT: v_or_b32_e32 v3, v21, v3
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v8, v9, v8
-; GFX11-NEXT: v_or_b32_e32 v9, v10, v11
-; GFX11-NEXT: v_or_b32_e32 v13, v13, v4
-; GFX11-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX11-NEXT: v_or_b32_e32 v15, v16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v36
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v68
-; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v67
-; GFX11-NEXT: v_and_b32_e32 v16, 0xff, v49
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v66
-; GFX11-NEXT: v_and_b32_e32 v18, 0xff, v65
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 8, v2
-; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v39
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v64
-; GFX11-NEXT: v_or_b32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v4, v10, v11
-; GFX11-NEXT: v_or_b32_e32 v10, v16, v17
-; GFX11-NEXT: v_or_b32_e32 v2, v18, v2
-; GFX11-NEXT: v_or_b32_e32 v11, v19, v20
-; GFX11-NEXT: v_and_b32_e32 v16, 0xff, v48
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v55
-; GFX11-NEXT: v_and_b32_e32 v18, 0xff, v52
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v54
-; GFX11-NEXT: v_and_b32_e32 v20, 0xff, v53
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v51
-; GFX11-NEXT: v_lshlrev_b32_e32 v12, 8, v12
-; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v50
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
-; GFX11-NEXT: v_or_b32_e32 v16, v16, v17
-; GFX11-NEXT: v_or_b32_e32 v17, v18, v19
-; GFX11-NEXT: v_or_b32_e32 v1, v20, v1
-; GFX11-NEXT: v_or_b32_e32 v12, v21, v12
-; GFX11-NEXT: v_or_b32_e32 v5, v22, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_or_b32_e32 v16, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v1, v10, v2
-; GFX11-NEXT: v_or_b32_e32 v2, v11, v18
-; GFX11-NEXT: v_or_b32_e32 v3, v17, v19
-; GFX11-NEXT: v_or_b32_e32 v4, v12, v5
-; GFX11-NEXT: s_clause 0x5
-; GFX11-NEXT: scratch_store_b128 v0, v[97:100], off offset:32
-; GFX11-NEXT: scratch_store_b128 v0, v[112:115], off offset:48
-; GFX11-NEXT: scratch_store_b128 v0, v[23:26], off offset:64
-; GFX11-NEXT: scratch_store_b128 v0, v[6:9], off offset:80
-; GFX11-NEXT: scratch_store_b128 v0, v[13:16], off offset:96
-; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
-; GFX11-NEXT: v_readlane_b32 s104, v41, 8
-; GFX11-NEXT: v_readlane_b32 s103, v41, 7
-; GFX11-NEXT: v_readlane_b32 s102, v41, 6
-; GFX11-NEXT: v_readlane_b32 s101, v41, 5
-; GFX11-NEXT: v_readlane_b32 s100, v41, 4
-; GFX11-NEXT: v_readlane_b32 s99, v41, 3
-; GFX11-NEXT: v_readlane_b32 s98, v41, 2
-; GFX11-NEXT: v_readlane_b32 s97, v41, 1
-; GFX11-NEXT: v_readlane_b32 s96, v41, 0
-; GFX11-NEXT: v_readlane_b32 s87, v40, 31
-; GFX11-NEXT: v_readlane_b32 s86, v40, 30
-; GFX11-NEXT: v_readlane_b32 s85, v40, 29
-; GFX11-NEXT: v_readlane_b32 s84, v40, 28
-; GFX11-NEXT: v_readlane_b32 s83, v40, 27
-; GFX11-NEXT: v_readlane_b32 s82, v40, 26
-; GFX11-NEXT: v_readlane_b32 s81, v40, 25
-; GFX11-NEXT: v_readlane_b32 s80, v40, 24
-; GFX11-NEXT: v_readlane_b32 s71, v40, 23
-; GFX11-NEXT: v_readlane_b32 s70, v40, 22
-; GFX11-NEXT: v_readlane_b32 s69, v40, 21
-; GFX11-NEXT: v_readlane_b32 s68, v40, 20
-; GFX11-NEXT: v_readlane_b32 s67, v40, 19
-; GFX11-NEXT: v_readlane_b32 s66, v40, 18
-; GFX11-NEXT: v_readlane_b32 s65, v40, 17
-; GFX11-NEXT: v_readlane_b32 s64, v40, 16
-; GFX11-NEXT: v_readlane_b32 s55, v40, 15
-; GFX11-NEXT: v_readlane_b32 s54, v40, 14
-; GFX11-NEXT: v_readlane_b32 s53, v40, 13
-; GFX11-NEXT: v_readlane_b32 s52, v40, 12
-; GFX11-NEXT: v_readlane_b32 s51, v40, 11
-; GFX11-NEXT: v_readlane_b32 s50, v40, 10
-; GFX11-NEXT: v_readlane_b32 s49, v40, 9
-; GFX11-NEXT: v_readlane_b32 s48, v40, 8
-; GFX11-NEXT: v_readlane_b32 s39, v40, 7
-; GFX11-NEXT: v_readlane_b32 s38, v40, 6
-; GFX11-NEXT: v_readlane_b32 s37, v40, 5
-; GFX11-NEXT: v_readlane_b32 s36, v40, 4
-; GFX11-NEXT: v_readlane_b32 s35, v40, 3
-; GFX11-NEXT: v_readlane_b32 s34, v40, 2
-; GFX11-NEXT: v_readlane_b32 s31, v40, 1
-; GFX11-NEXT: v_readlane_b32 s30, v40, 0
-; GFX11-NEXT: s_or_saveexec_b32 s0, -1
-; GFX11-NEXT: s_clause 0x3
-; GFX11-NEXT: scratch_load_b32 v40, off, s32
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:12
-; GFX11-NEXT: s_mov_b32 exec_lo, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v128i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_or_saveexec_b32 s4, -1
+; GFX11-TRUE16-NEXT: s_clause 0x3
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:12
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s30, 0
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s96, 0
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s72, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s73, v2
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s31, 1
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s97, 1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s62, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s63, v4
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s60, v5
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s34, 2
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s98, 2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s61, v6
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s58, v7
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s59, v8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s35, 3
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s99, 3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s46, v9
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s47, v10
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s44, v11
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s36, 4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s100, 4
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s45, v12
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v13
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s43, v14
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s37, 5
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s101, 5
+; GFX11-TRUE16-NEXT: s_mov_b32 vcc_hi, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s38, 6
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s102, 6
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s39, 7
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s103, 7
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s48, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s104, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s49, 9
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s50, 10
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s51, 11
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s52, 12
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s53, 13
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s54, 14
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s55, 15
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s64, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s65, 17
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s66, 18
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s67, 19
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s68, 20
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s69, 21
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s70, 22
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s71, 23
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s80, 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s81, 25
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s82, 26
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s83, 27
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s84, 28
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s85, 29
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s86, 30
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s87, 31
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s27, 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[26:27], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 15
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s99, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s100, s2, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s101, s1, 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 14
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s27, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s102, s1, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s103, s0, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s104, s0, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s85, s43, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s43, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 17
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s26, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s43, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s87, s42, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s86, s42, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 18
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s25, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s81, s45, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s98, s45, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s84, s45, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 19
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s48, s44, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s70, s47, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s97, s47, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 13
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s25, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s80, s47, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s83, s46, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s82, s46, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 20
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s66, s59, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s59, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s69, s59, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 21
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s24, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s71, s58, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s39, s58, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s55, s61, 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 22
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s23, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s61, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s65, s61, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s68, s60, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 23
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s67, s60, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s51, s63, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s96, s63, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 12
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s23, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s54, s63, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s38, s62, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s64, s62, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s36, s73, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s73, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s50, s73, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 25
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s22, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s53, s72, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s52, s72, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s34, s29, 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 26
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s21, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s35, s29, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s37, s28, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 27
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s49, s28, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[14:15], s[16:17], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[40:41], s[2:3], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 11
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s21, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[56:57], s[0:1], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[74:75], s[42:43], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[76:77], s[44:45], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 28
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[78:79], s[46:47], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[88:89], s[58:59], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[90:91], s[60:61], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 29
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s20, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[92:93], s[62:63], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[94:95], s[72:73], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[30:31], s[28:29], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 30
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s19, 24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 31
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s19, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 10
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s19, 8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 2
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s17, 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 3
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s17, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 9
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s17, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 4
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 5
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 6
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s3, 24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 7
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s3, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s3, 8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s44, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s12, 6
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s13, 7
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[24:25], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s12, 4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s13, 5
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[22:23], 24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s12, 2
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s13, 3
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s12, 0
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s13, 1
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[18:19], 24
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_hi
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-TRUE16-NEXT: .LBB91_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s29, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s29, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s78, s28, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_and_b32 s15, s45, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s28, s45, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s43, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s43, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s73, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s77, s73, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s76, s72, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s75, s72, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s11, s63, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s74, s63, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s73, s62, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s72, s62, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s62, s61, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s63, s61, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s61, s60, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s57, s60, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s40, s59, 0xffff0000
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s45, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s56, s59, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s29, s58, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s14, s58, 16
+; GFX11-TRUE16-NEXT: s_bfe_u32 s4, s45, 0x10010
+; GFX11-TRUE16-NEXT: s_and_b32 s12, s47, 0xffff0000
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s4, s45
+; GFX11-TRUE16-NEXT: s_lshl_b32 s13, s47, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s47, s46, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s41, s46, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s44, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s44, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s42, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s42, 16
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s45, 22
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s42, s45, s43
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s78
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v2
+; GFX11-TRUE16-NEXT: s_lshr_b32 s58, s42, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v3, v6
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s1, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s1, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v24.l
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s1
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s1, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_cselect_b32 s1, s1, s43
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshr_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s77
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v25.l
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: s_bfe_u32 s6, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_add_i32 s6, s6, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s6, s42, s6
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s6, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v8, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s0, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_bfe_u32 s42, s0, 0x10010
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, s0
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s0, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s42, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s0, s0, s42
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: s_lshr_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v27.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s76
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v6
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s42, s42, s43
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s75
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v7
+; GFX11-TRUE16-NEXT: s_lshr_b32 s59, s42, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s3, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s3, 0x10010
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s3
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s3, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s74
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v26.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v28.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s11
+; GFX11-TRUE16-NEXT: s_bfe_u32 s11, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_add_i32 s11, s11, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s11, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s11, s42, s11
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s11, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v1.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s2, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_bfe_u32 s42, s2, 0x10010
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, s2
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s2, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s42, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s2, s2, s42
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s73
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v30.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s72
+; GFX11-TRUE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s42, s42, s43
+; GFX11-TRUE16-NEXT: s_lshl_b32 s17, s17, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s60, s42, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s17, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v29.l
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s17, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v31.l
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s17
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s17, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s17, s17, s43
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s63
+; GFX11-TRUE16-NEXT: s_lshr_b32 s17, s17, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[13:14], 24, v[11:12]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[14:15], 24, v[4:5]
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s42, s42, s43
+; GFX11-TRUE16-NEXT: s_lshl_b32 s16, s16, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s62
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s42, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s16, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v1.l
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v3
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s16, 0x10010
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s16
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s16, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s16, s16, s43
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s16, s16, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s61
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v33.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s57
+; GFX11-TRUE16-NEXT: s_and_b32 s45, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s42, s42, s43
+; GFX11-TRUE16-NEXT: s_lshl_b32 s19, s19, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s61, s42, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s19, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s19, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v32.l
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s19
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s19, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s19, s19, s43
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s56
+; GFX11-TRUE16-NEXT: s_lshr_b32 s19, s19, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v34.l
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s45, s17, s60
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s44, s16, s44
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s40
+; GFX11-TRUE16-NEXT: s_bfe_u32 s40, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_add_i32 s40, s40, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s40, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s40, s42, s40
+; GFX11-TRUE16-NEXT: s_lshl_b32 s18, s18, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s40, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s18, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v1.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: s_bfe_u32 s42, s18, 0x10010
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, s18
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s18, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s42, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s18, s18, s42
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s29
+; GFX11-TRUE16-NEXT: s_lshr_b32 s18, s18, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s29, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-TRUE16-NEXT: s_bfe_u32 s42, s29, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, s29
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s29, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s42, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s14
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s29, s29, s42
+; GFX11-TRUE16-NEXT: s_lshl_b32 s21, s21, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s62, s29, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s42, s2, s11
+; GFX11-TRUE16-NEXT: s_bfe_u32 s21, s14, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v35.l
+; GFX11-TRUE16-NEXT: s_add_i32 s21, s21, s14
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s21, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s29, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s14, s14, s21
+; GFX11-TRUE16-NEXT: s_and_b32 s21, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s13
+; GFX11-TRUE16-NEXT: s_lshr_b32 s21, s14, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v36.l
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s43, s3, s59
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s12
+; GFX11-TRUE16-NEXT: s_bfe_u32 s12, s13, 0x10010
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_add_i32 s12, s12, s13
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s13, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s12, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s14, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s12, s13, s12
+; GFX11-TRUE16-NEXT: s_lshl_b32 s13, s20, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s12, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v1.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: s_bfe_u32 s14, s13, 0x10010
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: s_add_i32 s14, s14, s13
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s13, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s14, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s20, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s13, s13, s14
+; GFX11-TRUE16-NEXT: s_and_b32 s14, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s47
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v6
+; GFX11-TRUE16-NEXT: s_bfe_u32 s20, s14, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s29, s20, s14
+; GFX11-TRUE16-NEXT: s_lshr_b32 s20, s13, 16
+; GFX11-TRUE16-NEXT: s_addk_i32 s29, 0x7fff
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-TRUE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s13, s14, s29
+; GFX11-TRUE16-NEXT: s_lshl_b32 s14, s23, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s41
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v7
+; GFX11-TRUE16-NEXT: s_lshr_b32 s63, s13, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_bfe_u32 s23, s14, 0x10010
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s0, s6
+; GFX11-TRUE16-NEXT: s_add_i32 s23, s23, s14
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s23, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s13, s14, s23
+; GFX11-TRUE16-NEXT: s_and_b32 s14, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_lshr_b32 s23, s13, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v38.l
+; GFX11-TRUE16-NEXT: s_bfe_u32 s15, s14, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_add_i32 s15, s15, s14
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s15, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s13, s14, s15
+; GFX11-TRUE16-NEXT: s_lshl_b32 s14, s22, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s13, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v48.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v8
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v7, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: s_bfe_u32 s15, s14, 0x10010
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_add_i32 s15, s15, s14
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s15, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s22, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_cselect_b32 s14, s14, s15
+; GFX11-TRUE16-NEXT: s_and_b32 s15, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: s_lshr_b32 s22, s14, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v1.l
+; GFX11-TRUE16-NEXT: s_bfe_u32 s9, s10, 0x10010
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v7, 16, 1
+; GFX11-TRUE16-NEXT: s_add_i32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s10, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s14, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s72, s9, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_bfe_u32 s10, s8, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v51.l
+; GFX11-TRUE16-NEXT: s_add_i32 s10, s10, s8
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s8, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s9, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s8, s8, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_lshr_b32 s25, s8, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v10, v8
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v50.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v49.l
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s1, s58
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: s_bfe_u32 s7, s9, 0x10010
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_add_i32 s7, s7, s9
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s9, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s7, s9, s7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s24, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s7, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v53.l
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_bfe_u32 s5, s8, 0x10010
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v9
+; GFX11-TRUE16-NEXT: s_add_i32 s5, s5, s8
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s8, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s7, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s5, s8, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s24, s5, 16
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v8, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v2
+; GFX11-TRUE16-NEXT: s_bfe_u32 s7, s4, 0x10010
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: s_add_i32 s7, s7, s4
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s4, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s4, s4, s7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s27, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s73, s4, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v65.l, v54.l
+; GFX11-TRUE16-NEXT: s_bfe_u32 s7, s5, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v65.h, v52.l
+; GFX11-TRUE16-NEXT: s_add_i32 s7, s7, s5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x7fff
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s5, 22
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s4, s5, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshr_b32 s27, s4, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.l, v55.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[6:7], 24, v[22:23]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[7:8], 24, v[20:21]
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, v2.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[8:9], 24, v[18:19]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[9:10], 24, v[16:17]
+; GFX11-TRUE16-NEXT: s_bfe_u32 s6, s5, 0x10010
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s63
+; GFX11-TRUE16-NEXT: s_add_i32 s6, s6, s5
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s5, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s14, s5, s6
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s26, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s12
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s14, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s61
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s40
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 8, v65
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[1:2], 24, v[64:65]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[2:3], 24, v[68:69]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 24, v65
+; GFX11-TRUE16-NEXT: s_bfe_u32 s12, s11, 0x10010
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v64
+; GFX11-TRUE16-NEXT: s_add_i32 s12, s12, s11
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s11, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s12, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s14, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s12, s11, s12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 8, v64
+; GFX11-TRUE16-NEXT: s_lshr_b32 s26, s12, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 24, v69
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 8, v69
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v68
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 8, v68
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 24, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 8, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s62
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s72
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s47, s27, s73
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s46, s26, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[94:95], s[8:9], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[4:5], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[14:15], s[44:45], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[40:41], s[42:43], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[56:57], s[28:29], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 vcc, s[46:47], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[34:35], s[10:11], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[30:31], s[6:7], 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s47, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s47, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s46, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s46, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s47, s11, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s11, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s57, s10, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s10, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s74, s9, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s9, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s75, s8, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s8, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s76, s7, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s77, s7, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s78, s6, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s79, s6, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s88, s5, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s89, s5, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s90, s4, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s91, s4, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s92, s45, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s45, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s93, s44, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s44, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s95, s43, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s43, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s99, s42, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s100, s42, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s101, s29, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s102, s29, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s103, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s104, s28, 8
+; GFX11-TRUE16-NEXT: s_branch .LBB91_5
+; GFX11-TRUE16-NEXT: .LBB91_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr104
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr103
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr102
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr101
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr100
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr99
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr96
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr82
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr83
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr80
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr84
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr98
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr81
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr87
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr85
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr94
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr92
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 0
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s5, 1
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 2
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s5, 3
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s74, 4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s75, 5
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s74, 6
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s75, 7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-TRUE16-NEXT: s_branch .LBB91_2
+; GFX11-TRUE16-NEXT: .LBB91_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s94 :: v_dual_mov_b32 v14, s30
+; GFX11-TRUE16-NEXT: v_readlane_b32 s94, v43, 2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v96, s37 :: v_dual_mov_b32 v87, s34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s49 :: v_dual_mov_b32 v5, s35
+; GFX11-TRUE16-NEXT: v_readlane_b32 s95, v43, 3
+; GFX11-TRUE16-NEXT: v_readlane_b32 vcc_lo, v43, 6
+; GFX11-TRUE16-NEXT: v_readlane_b32 s30, v43, 0
+; GFX11-TRUE16-NEXT: v_readlane_b32 s34, v43, 4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, s42 :: v_dual_mov_b32 v54, s43
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s10 :: v_dual_mov_b32 v53, s44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s45 :: v_dual_mov_b32 v49, s98
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, s46 :: v_dual_mov_b32 v38, s47
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s97 :: v_dual_mov_b32 v39, s58
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s59 :: v_dual_mov_b32 v36, s60
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s9 :: v_dual_mov_b32 v32, s61
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v33, s62
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s63 :: v_dual_mov_b32 v30, s72
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s96 :: v_dual_mov_b32 v26, s73
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v27, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s29 :: v_dual_mov_b32 v25, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s87 :: v_dual_mov_b32 v64, s86
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s85 :: v_dual_mov_b32 v10, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, s4 :: v_dual_mov_b32 v68, s48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, s81 :: v_dual_mov_b32 v66, s84
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s83 :: v_dual_mov_b32 v69, s70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s82 :: v_dual_mov_b32 v23, s80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s71 :: v_dual_mov_b32 v71, s66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s39 :: v_dual_mov_b32 v21, s69
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v82, s68 :: v_dual_mov_b32 v81, s55
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s67 :: v_dual_mov_b32 v19, s65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v84, s38 :: v_dual_mov_b32 v83, s51
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s64 :: v_dual_mov_b32 v17, s54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v86, s53 :: v_dual_mov_b32 v11, s52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v85, s36 :: v_dual_mov_b32 v12, s50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s74 :: v_dual_mov_b32 v2, s76
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s78 :: v_dual_mov_b32 v7, s88
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s90 :: v_dual_mov_b32 v9, s92
+; GFX11-TRUE16-NEXT: s_mov_b32 s58, s11
+; GFX11-TRUE16-NEXT: v_readlane_b32 s59, v43, 8
+; GFX11-TRUE16-NEXT: v_readlane_b32 s60, v43, 9
+; GFX11-TRUE16-NEXT: v_readlane_b32 s61, v43, 10
+; GFX11-TRUE16-NEXT: v_readlane_b32 s62, v43, 11
+; GFX11-TRUE16-NEXT: v_readlane_b32 s63, v43, 12
+; GFX11-TRUE16-NEXT: v_readlane_b32 s72, v43, 13
+; GFX11-TRUE16-NEXT: v_readlane_b32 s73, v43, 14
+; GFX11-TRUE16-NEXT: v_readlane_b32 s13, v43, 15
+; GFX11-TRUE16-NEXT: v_readlane_b32 s15, v43, 16
+; GFX11-TRUE16-NEXT: v_readlane_b32 s41, v43, 17
+; GFX11-TRUE16-NEXT: v_readlane_b32 s46, v43, 18
+; GFX11-TRUE16-NEXT: v_readlane_b32 s47, v43, 19
+; GFX11-TRUE16-NEXT: v_readlane_b32 s11, v43, 20
+; GFX11-TRUE16-NEXT: v_readlane_b32 s57, v43, 21
+; GFX11-TRUE16-NEXT: v_readlane_b32 s10, v43, 22
+; GFX11-TRUE16-NEXT: v_readlane_b32 s74, v43, 23
+; GFX11-TRUE16-NEXT: v_readlane_b32 s9, v43, 24
+; GFX11-TRUE16-NEXT: v_readlane_b32 s75, v43, 25
+; GFX11-TRUE16-NEXT: v_readlane_b32 s8, v43, 26
+; GFX11-TRUE16-NEXT: v_readlane_b32 s76, v43, 27
+; GFX11-TRUE16-NEXT: v_readlane_b32 s77, v43, 28
+; GFX11-TRUE16-NEXT: v_readlane_b32 s78, v43, 29
+; GFX11-TRUE16-NEXT: v_readlane_b32 s79, v43, 30
+; GFX11-TRUE16-NEXT: v_readlane_b32 s88, v43, 31
+; GFX11-TRUE16-NEXT: v_readlane_b32 s89, v42, 0
+; GFX11-TRUE16-NEXT: v_readlane_b32 s90, v42, 1
+; GFX11-TRUE16-NEXT: v_readlane_b32 s91, v42, 2
+; GFX11-TRUE16-NEXT: v_readlane_b32 s92, v42, 3
+; GFX11-TRUE16-NEXT: v_readlane_b32 s45, v42, 4
+; GFX11-TRUE16-NEXT: v_readlane_b32 s93, v42, 5
+; GFX11-TRUE16-NEXT: v_readlane_b32 vcc_hi, v43, 7
+; GFX11-TRUE16-NEXT: v_readlane_b32 s44, v42, 6
+; GFX11-TRUE16-NEXT: v_readlane_b32 s31, v43, 1
+; GFX11-TRUE16-NEXT: v_readlane_b32 s95, v42, 7
+; GFX11-TRUE16-NEXT: v_readlane_b32 s43, v42, 8
+; GFX11-TRUE16-NEXT: v_readlane_b32 s35, v43, 5
+; GFX11-TRUE16-NEXT: .LBB91_5: ; %end
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s104, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s103, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s56, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s4
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s102, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s58, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s101, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s1, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s4
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s1, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s100, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s99, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s40, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s4
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s3, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s43, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s59, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s95, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s3, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s4
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s3, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s44, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s93, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s14, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s17, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s45, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s60, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s92, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s91, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s90, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s12, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s19, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s89, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s61, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s88, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_mov_b32 v113, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_mov_b32 v115, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s79, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s78, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s30, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s21, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s77, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s62, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s76, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s8, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s75, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s94, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s23, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s9, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s63, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s74, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_clause 0x1
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[97:100], off
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[112:115], off offset:16
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s10, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s57, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s34, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s2, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s11, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s25, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s72, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s47, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s1, s2
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s3, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s46, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s1, s2
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s3, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s41, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, vcc_lo, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s15, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s3, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s27, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s13, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s73, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_and_b32 v27, 0xff, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v113, s1 :: v_dual_lshlrev_b32 v4, 8, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 8, v14
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v115, s3 :: v_dual_and_b32 v96, 0xff, v96
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v27, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_lshlrev_b32 v5, 8, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v96, v14
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xff, v26
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v12, 8, v12
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v16, 8, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v4, v14
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v24
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v25
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 8, v87
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xff, v30
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v30, 0xff, v86
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v26, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v14, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v25, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v30, v13
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v28
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 8, v85
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xff, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xff, v84
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v28, 0xff, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xff, v31
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v30, 8, v83
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v14, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v25, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v26, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v28, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v29, v30
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v4, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v11, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v12, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v16, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v17, v24
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v36
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v11, 8, v18
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v82
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v8, 8, v8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v32
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 8, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v34
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 8, v81
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xff, v39
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v20
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v9, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v12, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v13, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v16, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v18, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v80
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 8, v7
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v35
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 8, v21
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xff, v37
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v71
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xff, v51
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v22
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xff, v70
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 8, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v14, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v16, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v18, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v20, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v22, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v12, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v18, 16, v7
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v9, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v11, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v13, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v14, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v17, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v38
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 8, v23
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v48
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v16, 8, v69
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xff, v53
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v18, 8, v68
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v67
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 8, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xff, v50
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v66
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v14, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v17, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v19, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v20, v21
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v18, 8, v65
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v14
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v49
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 8, v64
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xff, v54
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v10, 8, v10
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xff, v52
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 8, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v14, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v19, v20
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v15, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v21, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v22, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v14
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v8, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v17, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v18, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v10, v3
+; GFX11-TRUE16-NEXT: s_clause 0x5
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[97:100], off offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[112:115], off offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[27:30], off offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[4:7], off offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[11:14], off offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[15:18], off offset:112
+; GFX11-TRUE16-NEXT: v_readlane_b32 s104, v41, 8
+; GFX11-TRUE16-NEXT: v_readlane_b32 s103, v41, 7
+; GFX11-TRUE16-NEXT: v_readlane_b32 s102, v41, 6
+; GFX11-TRUE16-NEXT: v_readlane_b32 s101, v41, 5
+; GFX11-TRUE16-NEXT: v_readlane_b32 s100, v41, 4
+; GFX11-TRUE16-NEXT: v_readlane_b32 s99, v41, 3
+; GFX11-TRUE16-NEXT: v_readlane_b32 s98, v41, 2
+; GFX11-TRUE16-NEXT: v_readlane_b32 s97, v41, 1
+; GFX11-TRUE16-NEXT: v_readlane_b32 s96, v41, 0
+; GFX11-TRUE16-NEXT: v_readlane_b32 s87, v40, 31
+; GFX11-TRUE16-NEXT: v_readlane_b32 s86, v40, 30
+; GFX11-TRUE16-NEXT: v_readlane_b32 s85, v40, 29
+; GFX11-TRUE16-NEXT: v_readlane_b32 s84, v40, 28
+; GFX11-TRUE16-NEXT: v_readlane_b32 s83, v40, 27
+; GFX11-TRUE16-NEXT: v_readlane_b32 s82, v40, 26
+; GFX11-TRUE16-NEXT: v_readlane_b32 s81, v40, 25
+; GFX11-TRUE16-NEXT: v_readlane_b32 s80, v40, 24
+; GFX11-TRUE16-NEXT: v_readlane_b32 s71, v40, 23
+; GFX11-TRUE16-NEXT: v_readlane_b32 s70, v40, 22
+; GFX11-TRUE16-NEXT: v_readlane_b32 s69, v40, 21
+; GFX11-TRUE16-NEXT: v_readlane_b32 s68, v40, 20
+; GFX11-TRUE16-NEXT: v_readlane_b32 s67, v40, 19
+; GFX11-TRUE16-NEXT: v_readlane_b32 s66, v40, 18
+; GFX11-TRUE16-NEXT: v_readlane_b32 s65, v40, 17
+; GFX11-TRUE16-NEXT: v_readlane_b32 s64, v40, 16
+; GFX11-TRUE16-NEXT: v_readlane_b32 s55, v40, 15
+; GFX11-TRUE16-NEXT: v_readlane_b32 s54, v40, 14
+; GFX11-TRUE16-NEXT: v_readlane_b32 s53, v40, 13
+; GFX11-TRUE16-NEXT: v_readlane_b32 s52, v40, 12
+; GFX11-TRUE16-NEXT: v_readlane_b32 s51, v40, 11
+; GFX11-TRUE16-NEXT: v_readlane_b32 s50, v40, 10
+; GFX11-TRUE16-NEXT: v_readlane_b32 s49, v40, 9
+; GFX11-TRUE16-NEXT: v_readlane_b32 s48, v40, 8
+; GFX11-TRUE16-NEXT: v_readlane_b32 s39, v40, 7
+; GFX11-TRUE16-NEXT: v_readlane_b32 s38, v40, 6
+; GFX11-TRUE16-NEXT: v_readlane_b32 s37, v40, 5
+; GFX11-TRUE16-NEXT: v_readlane_b32 s36, v40, 4
+; GFX11-TRUE16-NEXT: v_readlane_b32 s35, v40, 3
+; GFX11-TRUE16-NEXT: v_readlane_b32 s34, v40, 2
+; GFX11-TRUE16-NEXT: v_readlane_b32 s31, v40, 1
+; GFX11-TRUE16-NEXT: v_readlane_b32 s30, v40, 0
+; GFX11-TRUE16-NEXT: s_or_saveexec_b32 s0, -1
+; GFX11-TRUE16-NEXT: s_clause 0x3
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:12
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v128i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_or_saveexec_b32 s4, -1
+; GFX11-FAKE16-NEXT: s_clause 0x3
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v43, s32 offset:12
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s30, 0
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s96, 0
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s72, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s73, v2
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s31, 1
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s97, 1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s62, v3
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s63, v4
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s60, v5
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s34, 2
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s98, 2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s61, v6
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s58, v7
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s59, v8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s35, 3
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s99, 3
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s56, v9
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s57, v10
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s46, v11
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s36, 4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s100, 4
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s47, v12
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v13
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s45, v14
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s37, 5
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s101, 5
+; GFX11-FAKE16-NEXT: s_mov_b32 vcc_hi, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s38, 6
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s102, 6
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s39, 7
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s103, 7
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s48, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s104, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s49, 9
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s50, 10
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s51, 11
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s52, 12
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s53, 13
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s54, 14
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s55, 15
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s64, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s65, 17
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s66, 18
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s67, 19
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s68, 20
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s69, 21
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s70, 22
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s71, 23
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s80, 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s81, 25
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s82, 26
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s83, 27
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s84, 28
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s85, 29
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s86, 30
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s87, 31
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s27, 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[26:27], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 15
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s99, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s100, s2, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s101, s1, 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 14
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s27, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s102, s1, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s103, s0, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s104, s0, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s85, s45, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s45, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 17
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s26, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s45, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s87, s44, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s86, s44, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 18
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s25, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s81, s47, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s98, s47, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s84, s47, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 19
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s48, s46, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s70, s57, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s97, s57, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 13
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s25, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s80, s57, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s83, s56, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s82, s56, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 20
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s66, s59, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s59, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s69, s59, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 21
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s24, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s71, s58, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s39, s58, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s55, s61, 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 22
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s23, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s61, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s65, s61, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s68, s60, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 23
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s67, s60, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s51, s63, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s96, s63, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 12
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s23, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s54, s63, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s38, s62, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s64, s62, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s36, s73, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s73, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s50, s73, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 25
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s22, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s53, s72, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s52, s72, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s34, s29, 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 26
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s21, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s35, s29, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s37, s28, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 27
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s49, s28, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[14:15], s[16:17], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[40:41], s[2:3], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 11
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s21, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[74:75], s[44:45], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[76:77], s[46:47], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 28
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[78:79], s[56:57], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[88:89], s[58:59], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[90:91], s[60:61], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 29
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s20, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[92:93], s[62:63], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[94:95], s[72:73], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[30:31], s[28:29], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 30
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s19, 24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 31
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s19, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 10
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s19, 8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 0
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s18, 8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 2
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s17, 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 3
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s17, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 9
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s17, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 4
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 5
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 6
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s3, 24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 7
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s3, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s3, 8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s46, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s12, 6
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s13, 7
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[24:25], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s12, 4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s13, 5
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[22:23], 24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s12, 2
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s13, 3
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s12, 0
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s13, 1
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[18:19], 24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_hi
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-FAKE16-NEXT: .LBB91_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s29, 0xffff0000
+; GFX11-FAKE16-NEXT: s_and_b32 s14, s47, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s15, s47, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s29, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s45, 0xffff0000
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s47, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s45, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s78, s28, 0xffff0000
+; GFX11-FAKE16-NEXT: s_bfe_u32 s6, s47, 0x10010
+; GFX11-FAKE16-NEXT: s_lshl_b32 s79, s28, 16
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s6, s47
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s73, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s77, s73, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s75, s72, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s76, s72, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s63, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s74, s63, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s72, s62, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s73, s62, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s63, s61, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s62, s61, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s61, s60, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s60, s60, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s41, s59, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s40, s59, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s28, s58, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s29, s58, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s13, s57, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s57, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s42, s56, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s43, s56, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s12, s46, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s46, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s44, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s44, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s47, 22
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s47, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: s_lshr_b32 s58, s44, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s78
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s1, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s79
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s1, 0x10010
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s1
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s1, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s1, s1, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v4, v6
+; GFX11-FAKE16-NEXT: s_lshr_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s0, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s77
+; GFX11-FAKE16-NEXT: s_bfe_u32 s5, s0, 0x10010
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v22, 16, v4
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s5, s0
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s44, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s0, 22
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v2, 16, v3
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s76
+; GFX11-FAKE16-NEXT: s_lshr_b32 s59, s44, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s75
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s3, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v87, 24, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v96, 16, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s3, 0x10010
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s3
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s3, 22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s3, s3, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v8, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_lshr_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v24
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s74
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v25, 16, v5
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v85, 24, v14
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s2, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: s_bfe_u32 s11, s2, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s11, s2
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s44, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s2, 22
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s2, s2, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v2, 16, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v86, 16, v13
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s17, s17, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s73
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s17
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s72
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s17, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshr_b32 s72, s44, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s17, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v27
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s17
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s17, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v28, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX11-FAKE16-NEXT: s_cselect_b32 s17, s17, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshr_b32 s17, s17, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s63
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 24, v16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v5, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v29
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v8, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s16, s16, 16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s44, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s16, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s62
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s16, 0x10010
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s16
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s16, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s16, s16, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s16, s16, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s60
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s61
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s19, s19, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s19
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s60, s44, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s19, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v9, v8
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s19, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v2
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s19
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s19, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s19, s19, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_lshr_b32 s19, s19, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s29
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s41
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s41, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s47, s17, s72
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s44, s41, 0x10010
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_add_i32 s44, s44, s41
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s41, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s44, 0x7fff
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s40
+; GFX11-FAKE16-NEXT: s_and_b32 s45, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s41, s41, s44
+; GFX11-FAKE16-NEXT: s_lshl_b32 s18, s18, 16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s18, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v1, 16, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_bfe_u32 s40, s18, 0x10010
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s28
+; GFX11-FAKE16-NEXT: s_add_i32 s44, s40, s18
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s41, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s44, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s18, 22
+; GFX11-FAKE16-NEXT: s_and_b32 s41, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s18, s18, s44
+; GFX11-FAKE16-NEXT: s_and_b32 s41, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s41
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v9, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s18, s18, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s28, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v10, 16, 1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s29, s28, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v1
+; GFX11-FAKE16-NEXT: s_add_i32 s29, s29, s28
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s28, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s29, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s41, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s28, s28, s29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s21, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s21
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_lshr_b32 s61, s28, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s44, s2, s11
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s21, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v4, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s29, s21, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v2
+; GFX11-FAKE16-NEXT: s_add_i32 s29, s29, s21
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s21, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s29, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s28, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s21, s21, s29
+; GFX11-FAKE16-NEXT: s_and_b32 s28, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s28
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v10
+; GFX11-FAKE16-NEXT: s_lshr_b32 s21, s21, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s45, s3, s59
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s46, s16, s46
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s13
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s13, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 24, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_bfe_u32 s28, s13, 0x10010
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, s13
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s13, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s28, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s29, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s13, s13, s28
+; GFX11-FAKE16-NEXT: s_lshl_b32 s20, s20, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s20
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v33, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v35
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s20, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v2, 16, v9
+; GFX11-FAKE16-NEXT: s_bfe_u32 s10, s20, 0x10010
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s10, s20
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s13, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s28, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s20, 22
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_cselect_b32 s13, s20, s28
+; GFX11-FAKE16-NEXT: s_and_b32 s20, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s42
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s20
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s43
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s28, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_bfe_u32 s20, s28, 0x10010
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_add_i32 s29, s20, s28
+; GFX11-FAKE16-NEXT: s_lshr_b32 s20, s13, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s29, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s28, 22
+; GFX11-FAKE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s13, s28, s29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s23, s23, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v4, v8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s62, s13, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v9
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s23, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s28, s23, 0x10010
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, s23
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s23, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s28, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: s_cselect_b32 s13, s23, s28
+; GFX11-FAKE16-NEXT: s_and_b32 s23, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s23
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s14
+; GFX11-FAKE16-NEXT: s_lshr_b32 s23, s13, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s14, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v71, v37, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s12
+; GFX11-FAKE16-NEXT: s_bfe_u32 s15, s14, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: s_add_i32 s15, s15, s14
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s15, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s13, s14, s15
+; GFX11-FAKE16-NEXT: s_lshl_b32 s14, s22, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v38
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s13, 16
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s14, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v70, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v9
+; GFX11-FAKE16-NEXT: s_bfe_u32 s12, s14, 0x10010
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_add_i32 s12, s12, s14
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s12, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s15, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s12, s14, s12
+; GFX11-FAKE16-NEXT: s_and_b32 s14, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s9, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_lshr_b32 s22, s12, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: s_bfe_u32 s14, s9, 0x10010
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_add_i32 s14, s14, s9
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s9, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s14, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s12, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s9, s9, s14
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s25, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v3, v4
+; GFX11-FAKE16-NEXT: s_lshr_b32 s63, s9, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s8, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_bfe_u32 s12, s8, 0x10010
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-FAKE16-NEXT: s_add_i32 s12, s12, s8
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s8, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s12, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s9, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_cselect_b32 s8, s8, s12
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_lshr_b32 s25, s8, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v12, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s28, s0, s5
+; GFX11-FAKE16-NEXT: s_bfe_u32 s9, s7, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v3
+; GFX11-FAKE16-NEXT: s_add_i32 s9, s9, s7
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s7, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s8, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s7, s7, s9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s24, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s7, 16
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s8, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s4, s8, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v2
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s4, s8
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s8, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s6, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s4, s8, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v52, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v12
+; GFX11-FAKE16-NEXT: s_lshr_b32 s24, s4, 16
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v52
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v9, 16, 1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s7, s6, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_add_i32 s7, s7, s6
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s6, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s4, s6, s7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s27, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v4, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_lshr_b32 s73, s4, 16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v49
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v51
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v66, v1, 16, v11
+; GFX11-FAKE16-NEXT: s_bfe_u32 s7, s6, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: s_add_i32 s7, s7, s6
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s6, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s4, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshr_b32 s27, s4, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v52
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v55, v50, 16, v4
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s22, s13
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v54, v2, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v67, v48, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[8:9], 24, v[17:18]
+; GFX11-FAKE16-NEXT: s_bfe_u32 s5, s6, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[9:10], 24, v[15:16]
+; GFX11-FAKE16-NEXT: s_add_i32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s6, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s14, s6, s5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s26, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s20, s10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s14, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[10:11], 24, v[13:14]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[6:7]
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s29, s1, s58
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s11, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[1:2], 24, v[54:55]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[2:3], 24, v[66:67]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[70:71]
+; GFX11-FAKE16-NEXT: s_bfe_u32 s10, s11, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[4:5], 24, v[19:20]
+; GFX11-FAKE16-NEXT: s_add_i32 s10, s10, s11
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s11, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s14, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s19, s60
+; GFX11-FAKE16-NEXT: s_lshr_b32 s26, s10, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s18, s40
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s23, s62
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 24, v55
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 8, v55
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v54
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 8, v54
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 24, v67
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 8, v67
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v66
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 8, v66
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 24, v71
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 8, v71
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v70
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 8, v70
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 24, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 8, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 8, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 8, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 8, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 8, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 8, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 8, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 8, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 8, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 8, v6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s21, s61
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s25, s63
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s57, s27, s73
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s56, s26, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s24, s12
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[94:95], s[8:9], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[4:5], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[14:15], s[46:47], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[40:41], s[44:45], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[42:43], s[28:29], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 vcc, s[56:57], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[34:35], s[10:11], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[30:31], s[6:7], 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s57, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s57, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s56, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s56, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s56, s11, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s11, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s57, s10, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s10, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s74, s9, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s9, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s75, s8, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s8, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s76, s7, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s77, s7, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s78, s6, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s79, s6, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s88, s5, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s89, s5, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s90, s4, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s91, s4, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s92, s47, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s47, s47, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s93, s46, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s46, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s95, s45, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s45, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s99, s44, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s100, s44, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s101, s29, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s102, s29, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s103, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s104, s28, 8
+; GFX11-FAKE16-NEXT: s_branch .LBB91_5
+; GFX11-FAKE16-NEXT: .LBB91_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr104
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr103
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr102
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr101
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr100
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr99
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr96
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr97
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr84
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr98
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr86
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr87
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr85
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr94
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr92
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 0
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s5, 1
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 2
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s5, 3
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s74, 4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s75, 5
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s74, 6
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s75, 7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-FAKE16-NEXT: s_branch .LBB91_2
+; GFX11-FAKE16-NEXT: .LBB91_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s94 :: v_dual_mov_b32 v11, s30
+; GFX11-FAKE16-NEXT: v_readlane_b32 s94, v43, 2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v96, s37 :: v_dual_mov_b32 v87, s34
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s49 :: v_dual_mov_b32 v7, s35
+; GFX11-FAKE16-NEXT: v_readlane_b32 s95, v43, 3
+; GFX11-FAKE16-NEXT: v_readlane_b32 vcc_lo, v43, 6
+; GFX11-FAKE16-NEXT: v_readlane_b32 s30, v43, 0
+; GFX11-FAKE16-NEXT: v_readlane_b32 s34, v43, 4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s44 :: v_dual_mov_b32 v51, s45
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s10 :: v_dual_mov_b32 v49, s46
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v39, s47 :: v_dual_mov_b32 v48, s98
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s56 :: v_dual_mov_b32 v37, s97
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s57 :: v_dual_mov_b32 v35, s58
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s59 :: v_dual_mov_b32 v33, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s60 :: v_dual_mov_b32 v31, s61
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s8 :: v_dual_mov_b32 v29, s62
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s63 :: v_dual_mov_b32 v28, s96
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s72 :: v_dual_mov_b32 v25, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s73 :: v_dual_mov_b32 v23, s28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s29 :: v_dual_mov_b32 v22, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v53, s87 :: v_dual_mov_b32 v54, s86
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s85 :: v_dual_mov_b32 v12, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v65, s4 :: v_dual_mov_b32 v66, s48
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v55, s81 :: v_dual_mov_b32 v64, s84
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v69, s83 :: v_dual_mov_b32 v70, s82
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v67, s70 :: v_dual_mov_b32 v68, s80
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v80, s71 :: v_dual_mov_b32 v19, s39
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v71, s66 :: v_dual_mov_b32 v20, s69
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v82, s68 :: v_dual_mov_b32 v17, s67
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v81, s55 :: v_dual_mov_b32 v18, s65
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v84, s38 :: v_dual_mov_b32 v15, s64
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v83, s51 :: v_dual_mov_b32 v16, s54
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v86, s53 :: v_dual_mov_b32 v13, s52
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v85, s36 :: v_dual_mov_b32 v14, s50
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s74 :: v_dual_mov_b32 v2, s76
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s78 :: v_dual_mov_b32 v4, s88
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s90 :: v_dual_mov_b32 v9, s92
+; GFX11-FAKE16-NEXT: s_mov_b32 s58, s11
+; GFX11-FAKE16-NEXT: v_readlane_b32 s59, v43, 8
+; GFX11-FAKE16-NEXT: v_readlane_b32 s72, v43, 9
+; GFX11-FAKE16-NEXT: v_readlane_b32 s60, v43, 10
+; GFX11-FAKE16-NEXT: v_readlane_b32 s61, v43, 11
+; GFX11-FAKE16-NEXT: v_readlane_b32 s62, v43, 12
+; GFX11-FAKE16-NEXT: v_readlane_b32 s63, v43, 13
+; GFX11-FAKE16-NEXT: v_readlane_b32 s73, v43, 14
+; GFX11-FAKE16-NEXT: v_readlane_b32 s13, v43, 15
+; GFX11-FAKE16-NEXT: v_readlane_b32 s15, v43, 16
+; GFX11-FAKE16-NEXT: v_readlane_b32 s41, v43, 17
+; GFX11-FAKE16-NEXT: v_readlane_b32 s43, v43, 18
+; GFX11-FAKE16-NEXT: v_readlane_b32 s56, v43, 19
+; GFX11-FAKE16-NEXT: v_readlane_b32 s11, v43, 20
+; GFX11-FAKE16-NEXT: v_readlane_b32 s57, v43, 21
+; GFX11-FAKE16-NEXT: v_readlane_b32 s10, v43, 22
+; GFX11-FAKE16-NEXT: v_readlane_b32 s74, v43, 23
+; GFX11-FAKE16-NEXT: v_readlane_b32 s9, v43, 24
+; GFX11-FAKE16-NEXT: v_readlane_b32 s75, v43, 25
+; GFX11-FAKE16-NEXT: v_readlane_b32 s8, v43, 26
+; GFX11-FAKE16-NEXT: v_readlane_b32 s76, v43, 27
+; GFX11-FAKE16-NEXT: v_readlane_b32 s77, v43, 28
+; GFX11-FAKE16-NEXT: v_readlane_b32 s78, v43, 29
+; GFX11-FAKE16-NEXT: v_readlane_b32 s79, v43, 30
+; GFX11-FAKE16-NEXT: v_readlane_b32 s88, v43, 31
+; GFX11-FAKE16-NEXT: v_readlane_b32 s89, v42, 0
+; GFX11-FAKE16-NEXT: v_readlane_b32 s90, v42, 1
+; GFX11-FAKE16-NEXT: v_readlane_b32 s91, v42, 2
+; GFX11-FAKE16-NEXT: v_readlane_b32 s92, v42, 3
+; GFX11-FAKE16-NEXT: v_readlane_b32 s47, v42, 4
+; GFX11-FAKE16-NEXT: v_readlane_b32 s93, v42, 5
+; GFX11-FAKE16-NEXT: v_readlane_b32 vcc_hi, v43, 7
+; GFX11-FAKE16-NEXT: v_readlane_b32 s46, v42, 6
+; GFX11-FAKE16-NEXT: v_readlane_b32 s31, v43, 1
+; GFX11-FAKE16-NEXT: v_readlane_b32 s95, v42, 7
+; GFX11-FAKE16-NEXT: v_readlane_b32 s45, v42, 8
+; GFX11-FAKE16-NEXT: v_readlane_b32 s35, v43, 5
+; GFX11-FAKE16-NEXT: .LBB91_5: ; %end
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s104, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s103, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s42, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s4
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s102, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s58, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s101, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s1, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s1, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s100, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s99, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s40, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s4
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s3, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s45, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s59, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s95, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s3, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s4
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s3, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s46, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s93, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s14, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s17, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s47, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s72, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s92, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s91, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s90, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s12, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s19, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s89, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s60, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s88, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_mov_b32 v113, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_mov_b32 v115, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s79, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s78, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s30, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s21, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s77, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s61, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s76, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s8, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s75, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s94, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s23, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s9, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s62, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s74, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_clause 0x1
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[97:100], off
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[112:115], off offset:16
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s10, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s57, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s34, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s2, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s11, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s25, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s63, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s56, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s1, s2
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s3, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s43, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s1, s2
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s3, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s41, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, vcc_lo, 8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s15, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s3, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s27, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s13, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s73, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_and_b32 v23, 0xff, v23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v113, s1 :: v_dual_lshlrev_b32 v6, 8, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_lshlrev_b32 v11, 8, v11
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v115, s3 :: v_dual_and_b32 v96, 0xff, v96
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v23, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v96, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 8, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xff, v24
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 8, v14
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v15, 8, v15
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v16, 8, v16
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v8, 8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, v6, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v22
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v87
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xff, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xff, v86
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v11, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v22, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v26, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, v24, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v25
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v85
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xff, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xff, v84
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xff, v28
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v27, 8, v83
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, v14, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, v22, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, v24, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, v25, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, v26, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, v6, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, v11, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, v13, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v15, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xff, v32
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 8, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v82
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xff, v31
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 8, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v30
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v16, 8, v81
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xff, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v18, 8, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, v11, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v13, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v15, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, v17, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v80
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v34
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v16, 8, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xff, v33
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v18, 8, v71
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xff, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v20, 8, v70
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xff, v69
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 8, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v14, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, v15, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, v17, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, v19, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v21, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, v10, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, v13, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, v14, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, v16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v36
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 8, v68
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v37
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 8, v67
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xff, v49
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v17, 8, v66
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xff, v65
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 8, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xff, v39
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v20, 8, v64
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v10, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v16, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v18, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v19, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xff, v48
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v17, 8, v55
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xff, v52
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v54
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xff, v53
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xff, v51
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v12, 8, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xff, v50
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, v16, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, v18, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v20, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, v21, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v22, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v18, 16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v10, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v11, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v17, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v12, v5
+; GFX11-FAKE16-NEXT: s_clause 0x5
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[97:100], off offset:32
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[112:115], off offset:48
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[23:26], off offset:64
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[6:9], off offset:80
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:96
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
+; GFX11-FAKE16-NEXT: v_readlane_b32 s104, v41, 8
+; GFX11-FAKE16-NEXT: v_readlane_b32 s103, v41, 7
+; GFX11-FAKE16-NEXT: v_readlane_b32 s102, v41, 6
+; GFX11-FAKE16-NEXT: v_readlane_b32 s101, v41, 5
+; GFX11-FAKE16-NEXT: v_readlane_b32 s100, v41, 4
+; GFX11-FAKE16-NEXT: v_readlane_b32 s99, v41, 3
+; GFX11-FAKE16-NEXT: v_readlane_b32 s98, v41, 2
+; GFX11-FAKE16-NEXT: v_readlane_b32 s97, v41, 1
+; GFX11-FAKE16-NEXT: v_readlane_b32 s96, v41, 0
+; GFX11-FAKE16-NEXT: v_readlane_b32 s87, v40, 31
+; GFX11-FAKE16-NEXT: v_readlane_b32 s86, v40, 30
+; GFX11-FAKE16-NEXT: v_readlane_b32 s85, v40, 29
+; GFX11-FAKE16-NEXT: v_readlane_b32 s84, v40, 28
+; GFX11-FAKE16-NEXT: v_readlane_b32 s83, v40, 27
+; GFX11-FAKE16-NEXT: v_readlane_b32 s82, v40, 26
+; GFX11-FAKE16-NEXT: v_readlane_b32 s81, v40, 25
+; GFX11-FAKE16-NEXT: v_readlane_b32 s80, v40, 24
+; GFX11-FAKE16-NEXT: v_readlane_b32 s71, v40, 23
+; GFX11-FAKE16-NEXT: v_readlane_b32 s70, v40, 22
+; GFX11-FAKE16-NEXT: v_readlane_b32 s69, v40, 21
+; GFX11-FAKE16-NEXT: v_readlane_b32 s68, v40, 20
+; GFX11-FAKE16-NEXT: v_readlane_b32 s67, v40, 19
+; GFX11-FAKE16-NEXT: v_readlane_b32 s66, v40, 18
+; GFX11-FAKE16-NEXT: v_readlane_b32 s65, v40, 17
+; GFX11-FAKE16-NEXT: v_readlane_b32 s64, v40, 16
+; GFX11-FAKE16-NEXT: v_readlane_b32 s55, v40, 15
+; GFX11-FAKE16-NEXT: v_readlane_b32 s54, v40, 14
+; GFX11-FAKE16-NEXT: v_readlane_b32 s53, v40, 13
+; GFX11-FAKE16-NEXT: v_readlane_b32 s52, v40, 12
+; GFX11-FAKE16-NEXT: v_readlane_b32 s51, v40, 11
+; GFX11-FAKE16-NEXT: v_readlane_b32 s50, v40, 10
+; GFX11-FAKE16-NEXT: v_readlane_b32 s49, v40, 9
+; GFX11-FAKE16-NEXT: v_readlane_b32 s48, v40, 8
+; GFX11-FAKE16-NEXT: v_readlane_b32 s39, v40, 7
+; GFX11-FAKE16-NEXT: v_readlane_b32 s38, v40, 6
+; GFX11-FAKE16-NEXT: v_readlane_b32 s37, v40, 5
+; GFX11-FAKE16-NEXT: v_readlane_b32 s36, v40, 4
+; GFX11-FAKE16-NEXT: v_readlane_b32 s35, v40, 3
+; GFX11-FAKE16-NEXT: v_readlane_b32 s34, v40, 2
+; GFX11-FAKE16-NEXT: v_readlane_b32 s31, v40, 1
+; GFX11-FAKE16-NEXT: v_readlane_b32 s30, v40, 0
+; GFX11-FAKE16-NEXT: s_or_saveexec_b32 s0, -1
+; GFX11-FAKE16-NEXT: s_clause 0x3
+; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:12
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -180286,9 +185691,10 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_and_b32 v1, 0xff, v35
; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
@@ -180304,6 +185710,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
@@ -180314,201 +185721,169 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v68
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v4, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v6, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v66
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v37
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v36
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v118
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v69
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v39
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v2, v70
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v48
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v82
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v7, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v81
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v9, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v10, 16, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v84
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v86
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v48
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v3, v82
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v55
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v2, v71
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v51
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v53
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v52
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v83
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v3, v86
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v1, v84
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v85
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v10, v97
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v87
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v99
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v103
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v114
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v98
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v0, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v96
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v54
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v87
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v2, v97
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v102
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v103
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v101
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v100
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v113
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v101
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v116
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v14, v128
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v114
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v1, v113
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v117
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v112
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v117
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v102
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v130
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v133
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v14, v132
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v0, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v116
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v128
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v134
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v132
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v133
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v3, v130
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v0, v161
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v147
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v148
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v118
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xff, v129
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v161
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v166
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v144
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v134
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v18, v147
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v167
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v0, v166
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v144
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v151
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v149
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v180
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v177
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v0, v180
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v149
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v177
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v165
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v162
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v42
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v41
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v0, v42
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v162
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v41
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v178
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v115
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v45
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v44
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v0, v45
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v115
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v44
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v119
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v59
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v56
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v0, v59
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v119
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v56
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v145
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v135
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v60
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v61
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v0, v60
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v135
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v61
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v150
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v146
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v63
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v62
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v0, v63
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v146
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v62
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v160
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v73
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v72
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v0, v73
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v160
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v72
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v176
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v164
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v75
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v74
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v0, v75
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v164
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v74
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v179
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v77
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v76
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v0, v77
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v76
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v183
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v182
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v78
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v79
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v0, v78
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v182
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v79
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v43
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v40
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v89
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v88
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v0, v89
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v40
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v88
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v46
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v91
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v90
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v0, v91
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v46
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v90
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v58
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v57
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v92
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v93
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v0, v92
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v57
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v93
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB93_3
; GFX11-TRUE16-NEXT: .LBB93_2: ; %cmp.true
@@ -180548,57 +185923,59 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(38)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(37)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v58
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(35)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v91, v2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(33)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v43
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v40
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v43, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(31)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v183
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v182
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v89, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(29)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v181
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v88, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v78, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v79, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v179
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v179, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v77, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(27)
@@ -180607,7 +185984,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v164
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(25)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v163
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v163, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v76, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -180618,18 +185995,18 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v74, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v73, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(23)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v150
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v150, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v72, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v146
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(21)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v145
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v135
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v63, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -180637,13 +186014,13 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(19)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v131
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v62, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v60, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v61, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v119
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v119, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v59, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(17)
@@ -180652,29 +186029,29 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v115
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v165
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v56, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v162
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v45, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v44, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v42, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v151
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v41, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v149
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(11)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v148
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v144
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v180, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -180688,8 +186065,8 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v133, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v129
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v161, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v118
@@ -180697,167 +186074,141 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v117
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v116
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v116, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v147, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v114
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v114, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v132, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v130, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v103
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v98
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v54
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v103
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v103, 0x300, v0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v128, v3
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v99
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v54
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v113, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 3, v35
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 3, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v113, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v128, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v100
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v101, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v102, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v101, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v102, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v97, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v55
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v96
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v97, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v100
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xff, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v87, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v51
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v86, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v85, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v84, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v52
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v50
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v83, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v48
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v49
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v87, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v82, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v81, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v71, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v80, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v86, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v85, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v84, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v50
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v49
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v83, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v82, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v81, v5
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v38, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v71, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v80, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v37, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v35
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v69, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v112, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v68, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v67, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v66, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v32
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v65, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v70, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 3, v32
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v69, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v34
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xff, v35
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xff, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v112, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v67, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v68, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, v66, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v51
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v34, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v39, 16, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v50, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v15, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v16, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v116
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v129
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v114, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v144, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v115
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v135
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v145, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v119, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v182
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v2, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v36, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v65, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v34
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v36.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v33.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v133, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v160, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v179, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v29, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v30, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v31, 16, v36
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v103.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v114.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v18.h, v129.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v19.h, v133.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v20.h, v144.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v21.h, v145.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v115.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v119.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v24.h, v131.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v25.h, v135.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v26.h, v150.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v27.h, v160.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v28.h, v179.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v29.h, v181.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.h, v182.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v43.l
; GFX11-TRUE16-NEXT: .LBB93_3: ; %end
; GFX11-TRUE16-NEXT: s_clause 0x1e
; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
@@ -202763,9 +208114,10 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_and_b32 v1, 0xff, v35
; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
@@ -202781,6 +208133,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
@@ -202791,201 +208144,169 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v68
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v4, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v6, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v66
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v37
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v36
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v118
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v69
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v39
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v2, v70
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v48
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v82
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v7, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v81
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v9, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v10, 16, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v84
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v86
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v48
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v3, v82
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v55
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v2, v71
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v51
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v53
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v52
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v83
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v3, v86
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v1, v84
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v85
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v10, v97
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v87
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v99
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v103
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v114
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v98
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v0, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v96
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v54
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v87
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v2, v97
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v102
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v103
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v101
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v100
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v113
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v101
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v116
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v14, v128
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v114
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v1, v113
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v117
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v112
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v117
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v102
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v130
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v133
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v14, v132
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v0, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v116
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v128
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v134
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v132
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v133
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v3, v130
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v0, v161
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v147
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v148
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v118
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xff, v129
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v161
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v166
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v144
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v134
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v18, v147
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v167
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v0, v166
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v144
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v151
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v149
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v180
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v177
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v0, v180
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v149
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v177
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v165
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v162
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v42
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v41
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v0, v42
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v162
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v41
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v178
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v115
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v45
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v44
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v0, v45
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v115
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v44
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v119
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v59
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v56
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v0, v59
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v119
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v56
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v145
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v135
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v60
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v61
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v0, v60
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v135
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v61
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v150
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v146
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v63
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v62
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v0, v63
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v146
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v62
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v160
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v73
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v72
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v0, v73
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v160
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v72
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v176
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v164
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v75
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v74
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v0, v75
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v164
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v74
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v179
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v77
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v76
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v0, v77
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v76
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v183
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v182
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v78
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v79
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v0, v78
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v182
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v79
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v43
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v40
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v89
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v88
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v0, v89
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v40
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v88
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v46
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v91
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v90
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v0, v91
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v46
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v90
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v58
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v57
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v92
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v93
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v0, v92
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v57
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v93
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB97_3
; GFX11-TRUE16-NEXT: .LBB97_2: ; %cmp.true
@@ -203025,57 +208346,59 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(38)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(37)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v58
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(35)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v91, v2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(33)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v43
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v40
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v43, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(31)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v183
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v182
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v89, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(29)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v181
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v88, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v78, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v79, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v179
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v179, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v77, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(27)
@@ -203084,7 +208407,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v164
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(25)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v163
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v163, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v76, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -203095,18 +208418,18 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v74, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v73, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(23)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v150
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v150, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v72, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v146
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(21)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v145
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v135
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v63, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -203114,13 +208437,13 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(19)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v131
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v62, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v60, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v61, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v119
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v119, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v59, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(17)
@@ -203129,29 +208452,29 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v115
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v165
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v56, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v162
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v45, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v44, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v42, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v151
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v41, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v149
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(11)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v148
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v144
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v180, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -203165,8 +208488,8 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v133, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v129
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v161, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v118
@@ -203174,167 +208497,141 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v117
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v116
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v116, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v147, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v114
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v114, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v132, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v130, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v103
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v98
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v54
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v103
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v103, 0x300, v0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v128, v3
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v99
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v54
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v113, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 3, v35
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 3, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v113, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v128, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v100
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v101, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v102, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v101, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v102, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v97, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v55
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v96
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v97, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v100
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xff, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v87, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v51
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v86, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v85, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v84, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v52
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v50
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v83, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v48
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v49
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v87, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v82, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v81, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v71, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v80, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v86, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v85, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v84, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v50
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v49
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v83, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v82, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v81, v5
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v38, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v71, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v80, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v37, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v35
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v69, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v112, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v68, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v67, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v66, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v32
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v65, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v70, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 3, v32
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v69, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v34
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xff, v35
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xff, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v112, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v67, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v68, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, v66, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v51
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v34, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v39, 16, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v50, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v15, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v16, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v116
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v129
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v114, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v144, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v115
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v135
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v145, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v119, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v182
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v2, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v36, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v65, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v34
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v36.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v33.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v133, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v160, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v179, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v29, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v30, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v31, 16, v36
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v103.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v114.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v18.h, v129.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v19.h, v133.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v20.h, v144.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v21.h, v145.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v115.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v119.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v24.h, v131.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v25.h, v135.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v26.h, v150.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v27.h, v160.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v28.h, v179.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v29.h, v181.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.h, v182.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v43.l
; GFX11-TRUE16-NEXT: .LBB97_3: ; %end
; GFX11-TRUE16-NEXT: s_clause 0x1e
; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
@@ -221009,700 +226306,1362 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v64bf16_to_v64f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
-; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
-; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
-; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
-; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
-; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
-; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
-; GFX11-NEXT: .LBB101_2: ; %cmp.true
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v16
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_add_f32 v0, 0x40c00000, v0 :: v_dual_lshlrev_b32 v3, 16, v17
-; GFX11-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_cndmask_b32 v0, v5, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v11, v3
-; GFX11-NEXT: v_lshl_or_b32 v16, v16, 16, v32
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v4, v5
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v20
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v19
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v2, v4
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v6 :: v_dual_add_f32 v3, 0x40c00000, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v8, v3
-; GFX11-NEXT: v_bfe_u32 v8, v2, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v5 :: v_dual_and_b32 v6, 0xffff0000, v22
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v8, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_lshlrev_b32 v5, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v3
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v3
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_add_nc_u32 v1, 0x7fff, v1
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v0
-; GFX11-NEXT: v_bfe_u32 v0, v6, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v23
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_add_nc_u32 v2, v4, v5
-; GFX11-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v36, 0xffff, v36
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v19, v19, 16, v34
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_bfe_u32 v1, v5, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v2, v4 :: v_dual_add_nc_u32 v0, v1, v5
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; GFX11-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_and_b32 v7, 0xffff0000, v25
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v5 :: v_dual_add_nc_u32 v5, 0x7fff, v6
-; GFX11-NEXT: v_add_f32_e32 v6, 0x40c00000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v3, v6, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_lshlrev_b32 v2, 16, v25
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_add_f32_e32 v0, 0x40c00000, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v4, v5, v7 :: v_dual_add_nc_u32 v1, v3, v6
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v26
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_nc_u32 v3, v3, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_and_b32_e32 v39, 0xffff, v39
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v3, v6 :: v_dual_and_b32 v5, 0xffff0000, v27
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v7, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v2
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v2, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v4, v6
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_lshlrev_b32 v3, 16, v28
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_and_b32_e32 v49, 0xffff, v49
-; GFX11-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_cndmask_b32 v0, v0, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v4, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v29
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v29
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_dual_add_f32 v4, 0x40c00000, v4 :: v_dual_add_f32 v5, 0x40c00000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v8, v4, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v8, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v2, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v30
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v31
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_add_f32 v1, 0x40c00000, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v0
-; GFX11-NEXT: v_bfe_u32 v0, v4, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_bfe_u32 v7, v1, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v4
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v3, v5 :: v_dual_add_f32 v3, 0x40c00000, v6
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v7, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v0
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v0
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v4, v6
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v1
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v6 :: v_dual_add_nc_u32 v5, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v8, v7
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v5, v9, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s0, s15, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v9
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-NEXT: v_bfe_u32 v4, v7, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
-; GFX11-NEXT: s_lshl_b32 s0, s16, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_and_b32 s0, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v5
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v10, v9, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v66, 0xffff, v66
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v6
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v9
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v4, v4, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v67, 0xffff, v67
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v54, 0xffff, v54
-; GFX11-NEXT: v_dual_cndmask_b32 v6, v7, v8 :: v_dual_add_nc_u32 v7, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v11, v5
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s18, 16
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v7, v7, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
-; GFX11-NEXT: v_bfe_u32 v10, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v8, v9 :: v_dual_add_nc_u32 v8, v10, v11
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: v_bfe_u32 v13, v10, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v7
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v8
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v9, v12, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: v_bfe_u32 v12, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v9
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v13
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s0, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_bfe_u32 v15, v12, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v10
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v11, v14, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v9
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s22, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v14, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v13
-; GFX11-NEXT: v_bfe_u32 v14, v9, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v11
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v9
-; GFX11-NEXT: v_bfe_u32 v13, v11, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_lshl_b32 s0, s23, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v11
-; GFX11-NEXT: v_bfe_u32 v82, v14, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v12, v15, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v82, v82, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_bfe_u32 v83, v12, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v5, v68, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v13, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v82
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v82, v83, v12
-; GFX11-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v13, v13, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v82
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v12
-; GFX11-NEXT: v_bfe_u32 v82, v83, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v13
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s25, 0xffff0000
-; GFX11-NEXT: v_or_b32_e32 v85, 0x400000, v83
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v82, v83
-; GFX11-NEXT: v_bfe_u32 v15, v13, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
-; GFX11-NEXT: s_lshl_b32 s0, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v13
-; GFX11-NEXT: v_bfe_u32 v86, v82, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v85 :: v_dual_add_nc_u32 v15, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v85, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v86, v86, v82
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v87, 16, v14
-; GFX11-NEXT: v_bfe_u32 v14, v83, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v100, 0x400000, v83
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v15, v85, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v86
-; GFX11-NEXT: v_add_f32_e64 v86, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s26, 16
-; GFX11-NEXT: v_or_b32_e32 v85, 0x400000, v82
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
-; GFX11-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_or_b32_e32 v102, 0x400000, v86
-; GFX11-NEXT: v_add_f32_e64 v96, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s27, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v97, v82, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v15, v15, v85, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v85, v86, 16, 1
-; GFX11-NEXT: v_bfe_u32 v99, v96, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v98, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v97, v97, v82
-; GFX11-NEXT: v_or_b32_e32 v103, 0x400000, v82
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
-; GFX11-NEXT: v_add_nc_u32_e32 v99, v99, v96
-; GFX11-NEXT: v_add_nc_u32_e32 v85, v85, v86
-; GFX11-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v97
-; GFX11-NEXT: v_bfe_u32 v101, v98, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v112, 0x400000, v96
-; GFX11-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v99
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v82, v97, v103 :: v_dual_add_nc_u32 v85, 0x7fff, v85
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v96, v96
-; GFX11-NEXT: v_add_nc_u32_e32 v101, v101, v98
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v14, v83
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v82
-; GFX11-NEXT: v_cndmask_b32_e32 v96, v99, v112, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v86, v86
-; GFX11-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v101
-; GFX11-NEXT: v_or_b32_e32 v101, 0x400000, v98
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v96
-; GFX11-NEXT: v_cndmask_b32_e32 v85, v85, v102, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v98, v98
-; GFX11-NEXT: v_and_b32_e32 v82, 0xffff, v82
-; GFX11-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v65, 16, v67
-; GFX11-NEXT: v_dual_cndmask_b32 v86, v97, v101 :: v_dual_and_b32 v65, 0xffff, v28
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v85
-; GFX11-NEXT: v_lshrrev_b32_e32 v97, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v80, 0xffff, v80
-; GFX11-NEXT: v_lshrrev_b32_e32 v85, 16, v86
-; GFX11-NEXT: v_cndmask_b32_e32 v14, v14, v100, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v86, 0xffff, v96
-; GFX11-NEXT: v_lshl_or_b32 v1, v1, 16, v68
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v83, 16, v82
-; GFX11-NEXT: v_lshl_or_b32 v15, v85, 16, v86
-; GFX11-NEXT: v_and_b32_e32 v83, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v86, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v82, 0xffff, v96
-; GFX11-NEXT: v_and_b32_e32 v96, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v85, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v87, 16, v83
-; GFX11-NEXT: v_lshl_or_b32 v10, v9, 16, v86
-; GFX11-NEXT: v_lshl_or_b32 v13, v97, 16, v82
-; GFX11-NEXT: v_and_b32_e32 v82, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v9, v81, 16, v96
-; GFX11-NEXT: v_and_b32_e32 v81, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v83, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v11, v84, 16, v85
-; GFX11-NEXT: v_lshl_or_b32 v6, v69, 16, v82
-; GFX11-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v2, v64, 16, v66
-; GFX11-NEXT: v_and_b32_e32 v64, 0xffff, v29
-; GFX11-NEXT: v_lshl_or_b32 v7, v70, 16, v81
-; GFX11-NEXT: v_and_b32_e32 v70, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v0, v55, 16, v69
-; GFX11-NEXT: v_and_b32_e32 v55, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v64
-; GFX11-NEXT: v_and_b32_e32 v51, 0xffff, v24
-; GFX11-NEXT: v_and_b32_e32 v66, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v27, v50, 16, v65
-; GFX11-NEXT: v_lshl_or_b32 v29, v52, 16, v55
-; GFX11-NEXT: v_and_b32_e32 v50, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v52, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v24, v38, 16, v39
-; GFX11-NEXT: v_lshl_or_b32 v22, v37, 16, v51
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v8, v71, 16, v80
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v83
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v70
-; GFX11-NEXT: v_lshl_or_b32 v30, v53, 16, v54
-; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v66
-; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v49
-; GFX11-NEXT: v_lshl_or_b32 v23, v23, 16, v50
-; GFX11-NEXT: v_lshl_or_b32 v21, v21, 16, v52
-; GFX11-NEXT: v_lshl_or_b32 v20, v35, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v18, v33, 16, v37
-; GFX11-NEXT: v_lshl_or_b32 v17, v17, 16, v38
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
-; GFX11-NEXT: .LBB101_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v64f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX11-TRUE16-NEXT: .LBB101_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v85, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v97, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v87, 0x400000, v85
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v98, v97, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v98, v98, v97
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v98, 0x7fff, v98
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_cndmask_b32 v0, v5, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v32.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v11, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v33.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v4, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v20
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v19
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v34.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v2, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v6 :: v_dual_add_f32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v8, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v5 :: v_dual_and_b32 v6, 0xffff0000, v22
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_lshlrev_b32 v5, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v35.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_add_nc_u32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v23
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_add_nc_u32 v2, v4, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v36.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v23
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v37.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v2, v4 :: v_dual_add_f32 v2, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v7, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_and_b32 v7, 0xffff0000, v25
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v38.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v5 :: v_dual_add_nc_u32 v5, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v39.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_lshlrev_b32 v2, 16, v25
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v5, v7 :: v_dual_add_nc_u32 v1, v3, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v26
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_nc_u32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v48.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v6 :: v_dual_and_b32 v5, 0xffff0000, v27
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v7, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v2, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v4, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_lshlrev_b32 v3, 16, v28
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v50.l
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v49.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v4, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v29
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v29
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v51.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_cndmask_b32 v0, v0, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v8, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v2, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v30
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v30
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v52.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v31
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v31
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v53.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v4
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v3, v5 :: v_dual_add_f32 v3, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v7, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v4, v5 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v54.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v5, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v55.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v0, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v64.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v7
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v7 :: v_dual_add_nc_u32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v4, v6 :: v_dual_add_nc_u32 v4, v7, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v10, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v5, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v4, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v68.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v6, v8 :: v_dual_add_nc_u32 v6, v9, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v12, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v7, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v6, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v10, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v70.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v8, v10 :: v_dual_add_nc_u32 v8, v11, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v10
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v9, v13 :: v_dual_add_nc_u32 v12, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v8, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v80.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v10, v12 :: v_dual_add_nc_u32 v10, v13, v15
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v82, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v10, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v82, v82, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v11, v11, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v82
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v82, v10, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v84, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v82
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v14, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v84, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 16, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v83.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v12, v12, v14 :: v_dual_add_nc_u32 v13, v15, v84
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v82, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v85, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v81.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v84
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v82
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v15, v85
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v82
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v86.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v96, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v14, v84, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v85, v85
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v84, v96, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v14
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v15, v87, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v87, v82, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v84
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v99, 0x400000, v82
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 16, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v84, v87, v82
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v87, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v85.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v14, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v87, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v84, 0x7fff, v84
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v100, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v15, v87
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v101, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v84, v99, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v99, 0x400000, v97
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v97, v97
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v84, v100, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v87
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v82
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v98, v99, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v84, 0x7fff, v84
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v96.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v71.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 16, v97
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v15, v100, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v66.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v15
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v84, v84, v101, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v65.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v84
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v87.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB101_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB101_2
+; GFX11-TRUE16-NEXT: .LBB101_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v64f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX11-FAKE16-NEXT: .LBB101_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v0, 0x40c00000, v0 :: v_dual_lshlrev_b32 v3, 16, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_cndmask_b32 v0, v5, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v11, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v16, 16, v32
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v4, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v20
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v19
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v2, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v6 :: v_dual_add_f32 v3, 0x40c00000, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v8, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v5 :: v_dual_and_b32 v6, 0xffff0000, v22
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v8, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_lshlrev_b32 v5, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_add_nc_u32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v23
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_add_nc_u32 v2, v4, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v19, 16, v34
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v2, v4 :: v_dual_add_nc_u32 v0, v1, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_and_b32 v7, 0xffff0000, v25
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v0, v5 :: v_dual_add_nc_u32 v5, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_lshlrev_b32 v2, 16, v25
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v4, v5, v7 :: v_dual_add_nc_u32 v1, v3, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_nc_u32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v39, 0xffff, v39
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v3, v6 :: v_dual_and_b32 v5, 0xffff0000, v27
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v7, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v2, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v4, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_lshlrev_b32 v3, 16, v28
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v49
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_cndmask_b32 v0, v0, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v4, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v29
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v29
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v4, 0x40c00000, v4 :: v_dual_add_f32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v8, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v2, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v30
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v31
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_add_f32 v1, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v4
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v3, v5 :: v_dual_add_f32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v7, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v4, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v6 :: v_dual_add_nc_u32 v5, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v8, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v66, 0xffff, v66
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v4, v4, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v67, 0xffff, v67
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v54, 0xffff, v54
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v6, v7, v8 :: v_dual_add_nc_u32 v7, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v11, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v7, v7, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v8, v9 :: v_dual_add_nc_u32 v8, v10, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v11, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v82, v14, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v12, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v82, v82, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v83, v12, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v68, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v13, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v82
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v82, v83, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v13, v13, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v82
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v82, v83, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v85, 0x400000, v83
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v82, v83
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v86, v82, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v85 :: v_dual_add_nc_u32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v85, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v86, v86, v82
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v87, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v83, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v100, 0x400000, v83
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v15, v85, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v86
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v86, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v85, 0x400000, v82
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v102, 0x400000, v86
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v96, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v97, v82, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v15, v15, v85, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v85, v86, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v99, v96, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v98, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v97, v97, v82
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v103, 0x400000, v82
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v99, v99, v96
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v85, v85, v86
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v97
+; GFX11-FAKE16-NEXT: v_bfe_u32 v101, v98, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v112, 0x400000, v96
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v99
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v82, v97, v103 :: v_dual_add_nc_u32 v85, 0x7fff, v85
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v96, v96
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v101, v101, v98
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v14, v83
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v82
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v96, v99, v112, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v86, v86
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v101
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v101, 0x400000, v98
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v96, 16, v96
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v85, v85, v102, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v98, v98
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v82, 0xffff, v82
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v65, 16, v67
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v86, v97, v101 :: v_dual_and_b32 v65, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v85
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v97, 16, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v80, 0xffff, v80
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v85, 16, v86
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v14, v14, v100, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v86, 0xffff, v96
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v1, 16, v68
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v96, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v83, 16, v82
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v85, 16, v86
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v83, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v86, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v82, 0xffff, v96
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v96, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v85, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v87, 16, v83
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v9, 16, v86
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v97, 16, v82
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v82, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v81, 16, v96
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v81, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v83, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v84, 16, v85
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v69, 16, v82
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v64, 16, v66
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v64, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v70, 16, v81
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v70, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v55, 16, v69
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v55, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v51, 16, v64
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v51, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v66, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v50, 16, v65
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v52, 16, v55
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v52, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v38, 16, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v37, 16, v51
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v71, 16, v80
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v83
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v70
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v53, 16, v54
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v26, 16, v66
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v48, 16, v49
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v23, 16, v50
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v21, 16, v52
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v17, 16, v38
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB101_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB101_2
+; GFX11-FAKE16-NEXT: .LBB101_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -227079,568 +233038,496 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v33, 16, v17
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v20
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v51, 16, v23
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v50, 16, v22
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v32, 0x40c00000, v32
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v16 :: v_dual_lshlrev_b32 v35, 16, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff0000, v24
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v32, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v32
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_lshlrev_b32 v52, 16, v24
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v32, 0x40c00000, v32
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v70, 16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v33, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v32, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v32
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v34
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v37, v33, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v16, v16, v32, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v48, 0x400000, v33
+; GFX11-TRUE16-NEXT: v_add3_u32 v38, v38, v33, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v35, v35, v32, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, 0x400000, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff0000, v30
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v16, v16, v38
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v17
-; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v34, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff0000, v2
-; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v34, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v36, 16, 1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v80, 16, v5
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v82, 16, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v17, v39, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v24, 0x40c00000, v24
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v54, 16, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v35, v36, vcc_lo
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_lshlrev_b32 v34, 16, v18
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v34, 0x40c00000, v34
+; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v26, 0x40c00000, v26
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v80, 16, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v35, v35, v17, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 0x40c00000, v19 :: v_dual_lshlrev_b32 v82, 16, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v64, 16, v28
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v16, 0x40c00000, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v84, 16, v8
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v66, 16, v30
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v28, 0x40c00000, v28
+; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v30, 0x40c00000, v30
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v86, 16, v10
+; GFX11-TRUE16-NEXT: v_add3_u32 v37, v37, v16, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v68, 16, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v96, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v37, v39, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v36
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v82, 0x40c00000, v82 :: v_dual_lshlrev_b32 v83, 16, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v36, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_cndmask_b32 v33, v38, v48
+; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v34, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v48, 16, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v17, v35, v37 :: v_dual_and_b32 v6, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v35, v38, v34, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v34
+; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v18, v37, v32 :: v_dual_add_f32 v37, 0x40c00000, v38
-; GFX11-TRUE16-NEXT: v_add3_u32 v32, v34, v36, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v35, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v35, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v35, v38, v18, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_add3_u32 v38, v39, v36, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v35, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v19, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff0000, v19
-; GFX11-TRUE16-NEXT: v_bfe_u32 v36, v37, 16, 1
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v83, 0x40c00000, v83 :: v_dual_add_f32 v8, 0x40c00000, v8
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v32, v33, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v33, v34, v35, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v35
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v38, 16, v19
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v84, 16, v9
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v86, 16, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v48
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX11-TRUE16-NEXT: v_add3_u32 v37, v37, v19, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v35, v38, v39 :: v_dual_lshlrev_b32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v19
+; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v36, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
; GFX11-TRUE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-TRUE16-NEXT: v_add3_u32 v33, v36, v37, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v37
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v39 :: v_dual_lshlrev_b32 v39, 16, v20
-; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v38, 16, 1
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v86, 0x40c00000, v86 :: v_dual_lshlrev_b32 v87, 16, v12
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v36, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v34, v35, v38, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v38
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v48, 16, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v87, 0x40c00000, v87 :: v_dual_lshlrev_b32 v96, 16, v13
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v34, v35, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v34, v37, v36, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36
-; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v39, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v34.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v33.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v37, v39, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v37, v48, v36, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v20, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v39
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v48, 0x40c00000, v48 :: v_dual_add_f32 v49, 0x40c00000, v21
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v34, v34, v35 :: v_dual_lshlrev_b32 v21, 16, v22
-; GFX11-TRUE16-NEXT: v_add3_u32 v35, v37, v39, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v38, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v48, 16, 1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v50, 0x40c00000, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v97, 0x400000, v87
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v35, v35, v36, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v36, v37, v38, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v38
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX11-TRUE16-NEXT: v_bfe_u32 v49, v38, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v35.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v32.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v37, v39, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v37, v48, v20, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v20
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v21
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-TRUE16-NEXT: v_add3_u32 v48, v49, v38, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v37, v39, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v21, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v48
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff0000, v25
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v36, v37, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v39, v48, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v49, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v50
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v36.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v39, v39, v21, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v37, v48, v49 :: v_dual_lshlrev_b32 v48, 16, v23
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v21
+; GFX11-TRUE16-NEXT: v_bfe_u32 v50, v38, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v48, 0x40c00000, v48 :: v_dual_cndmask_b32 v21, v39, v49
+; GFX11-TRUE16-NEXT: v_add3_u32 v39, v50, v38, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-TRUE16-NEXT: v_bfe_u32 v50, v22, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-TRUE16-NEXT: v_bfe_u32 v51, v48, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v23, 0x40c00000, v23
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v37.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v39, v49, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v39, v50, v22, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v22
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-TRUE16-NEXT: v_add3_u32 v50, v51, v48, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v48
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v22, v39, v49, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v49, v23, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v22
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v64, 16, v28
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff0000, v29
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v67, 16, v0
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v21, v37, v38, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v39, v49, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v49
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v50, 16, 1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v22, v37, v38 :: v_dual_lshlrev_b32 v71, 16, v4
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v49, 0x40c00000, v51
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v23
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v39, v50, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v50
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_lshlrev_b32 v51, 16, v24
-; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v48, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v37, v38, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v48
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v52
+; GFX11-TRUE16-NEXT: v_add3_u32 v49, v49, v23, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v39, v50, v51 :: v_dual_lshlrev_b32 v50, 16, v25
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v23
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v48, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v50 :: v_dual_cndmask_b32 v23, v49, v51
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v49, v52, v48, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v48
+; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v24, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v51, 0x40c00000, v51
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v39, v48, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v49, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v22.h
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v71, 0x40c00000, v71 :: v_dual_add_f32 v4, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v37, v37, v38, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v38, v39, v49, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v49
-; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v50, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v49, 0x40c00000, v52 :: v_dual_lshlrev_b32 v52, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v21
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v24, v38, v39 :: v_dual_and_b32 v5, 0xffff0000, v5
-; GFX11-TRUE16-NEXT: v_add3_u32 v38, v48, v50, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v50
-; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v51, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v53, v50, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v25, 0x40c00000, v25
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v48, v49, v51, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v49, v52, v24, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v24
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-TRUE16-NEXT: v_add3_u32 v52, v53, v50, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v50
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v24, v49, v51, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v51, v25, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-TRUE16-NEXT: v_bfe_u32 v50, v49, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v50, 0x40c00000, v54
+; GFX11-TRUE16-NEXT: v_add3_u32 v51, v51, v25, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v49, v52, v53 :: v_dual_lshlrev_b32 v52, 16, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v25
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v54, v50, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX11-TRUE16-NEXT: v_add_f32_e32 v52, 0x40c00000, v52
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v80, 0x40c00000, v80 :: v_dual_add_f32 v5, 0x40c00000, v5
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v38, v39, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v39, v48, v51, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v48, 0x400000, v51
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v51, 0x40c00000, v53
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v81, 16, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v39, v48, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v39, v50, v49, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v48, 0x400000, v49
-; GFX11-TRUE16-NEXT: v_bfe_u32 v50, v52, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v52
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v81, 0x40c00000, v81 :: v_dual_add_f32 v6, 0x40c00000, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v39, v39, v48, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v48, v50, v52, 0x7fff
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v38.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v51, v53, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v51, v54, v50, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v50
+; GFX11-TRUE16-NEXT: v_bfe_u32 v54, v26, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11-TRUE16-NEXT: v_bfe_u32 v55, v52, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v27, 0x40c00000, v27
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v49.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v50, v51, v53, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v51, v54, v26, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v26
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-TRUE16-NEXT: v_add3_u32 v54, v55, v52, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v55, 0x400000, v52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v51, v53, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v53, v27, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v53, 16, v26
-; GFX11-TRUE16-NEXT: v_bfe_u32 v50, v51, 16, 1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v84, 0x40c00000, v84 :: v_dual_add_f32 v9, 0x40c00000, v9
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v48, v48, v49, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v53, 0x40c00000, v53 :: v_dual_add_f32 v54, 0x40c00000, v26
-; GFX11-TRUE16-NEXT: v_add3_u32 v49, v50, v51, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v50, 0x400000, v51
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v26, 16, v27
-; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v53, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v85, 16, v10
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v49, v49, v50, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v50, v52, v53, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v54, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v55, 0x40c00000, v26
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v53, 0x40c00000, v27
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v85, 0x40c00000, v85 :: v_dual_cndmask_b32 v26, v50, v51
-; GFX11-TRUE16-NEXT: v_add3_u32 v50, v52, v54, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v54
-; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v55, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v54, 0x40c00000, v64
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v28
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v10, 0x40c00000, v10 :: v_dual_add_f32 v11, 0x40c00000, v11
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v50, v51, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v50, v52, v55, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v55
-; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v53, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v55, 0x40c00000, v64
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v27.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v26
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v28, v50, v51 :: v_dual_and_b32 v13, 0xffff0000, v13
-; GFX11-TRUE16-NEXT: v_add3_u32 v50, v52, v53, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v53
-; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v54, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX11-TRUE16-NEXT: v_bfe_u32 v53, v55, 16, 1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v26
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v37.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v23
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v50, v50, v51, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v51, v52, v54, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v52, 0x400000, v54
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v64, 16, v29
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v52, 0x40c00000, v64
+; GFX11-TRUE16-NEXT: v_add3_u32 v53, v53, v27, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v51, v54, v55 :: v_dual_lshlrev_b32 v54, 16, v29
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v55, 0x400000, v27
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v29, v51, v52, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v51, v53, v55, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v52, 0x400000, v55
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v54, 0x40c00000, v65 :: v_dual_lshlrev_b32 v65, 16, v30
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v64, 0x40c00000, v64
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v50.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v51, v51, v52, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v55, v54, 16, 1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v65, 0x40c00000, v65
-; GFX11-TRUE16-NEXT: v_bfe_u32 v53, v64, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v28
-; GFX11-TRUE16-NEXT: v_add3_u32 v52, v53, v64, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v64
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v64, 0x40c00000, v66
+; GFX11-TRUE16-NEXT: v_bfe_u32 v64, v52, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v54, 0x40c00000, v54
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v48.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v53, v55, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v53, v64, v52, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v55, 0x400000, v52
+; GFX11-TRUE16-NEXT: v_bfe_u32 v64, v28, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
+; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v54, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v29, 0x40c00000, v29
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v51.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v52, v53, v55, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v53, v64, v28, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v55, 0x400000, v28
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-TRUE16-NEXT: v_add3_u32 v64, v65, v54, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v54
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v53, v55, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v55, v29, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v54, 0x40c00000, v66
+; GFX11-TRUE16-NEXT: v_add3_u32 v55, v55, v29, 0x7fff
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v66, 16, v31
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v30, v52, v53 :: v_dual_and_b32 v31, 0xffff0000, v31
-; GFX11-TRUE16-NEXT: v_add3_u32 v52, v55, v54, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v54
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v53, v64, v65 :: v_dual_lshlrev_b32 v64, 16, v31
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v29
+; GFX11-TRUE16-NEXT: v_bfe_u32 v66, v54, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v64, 0x40c00000, v64 :: v_dual_cndmask_b32 v29, v55, v65
+; GFX11-TRUE16-NEXT: v_add3_u32 v55, v66, v54, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v54
+; GFX11-TRUE16-NEXT: v_bfe_u32 v66, v30, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54
-; GFX11-TRUE16-NEXT: v_bfe_u32 v55, v65, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v54, 0x400000, v65
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v66, 0x40c00000, v66 :: v_dual_add_f32 v31, 0x40c00000, v31
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v52, v52, v53, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v53, v55, v65, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v55, v64, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v65, v65
-; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v66, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v53, v53, v54, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v54, v55, v64, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v55, 0x400000, v64
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v64, 0x400000, v66
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v53
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v54, v54, v55, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v55, v65, v66, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v67, v64, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v53.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v54, v55, v65, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v55, v66, v30, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v30
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-TRUE16-NEXT: v_add3_u32 v66, v67, v64, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v67, 0x400000, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v30, v55, v65, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v31, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v67, 0x40c00000, v67 :: v_dual_add_f32 v66, 0x40c00000, v68
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v68, 16, v1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v55, v64, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v55, v65, v31, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v64, 0x400000, v31
-; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v67, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v64, 0x40c00000, v68
+; GFX11-TRUE16-NEXT: v_add3_u32 v65, v65, v31, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v55, v66, v67 :: v_dual_lshlrev_b32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v67, 0x400000, v31
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v68, v64, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v55, v64, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v55, v65, v67, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v64, 0x400000, v67
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v67, v67
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v67, 0x40c00000, v69
-; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v66, 16, 1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v69, 16, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v31.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v55, v64, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v64, 0x400000, v66
-; GFX11-TRUE16-NEXT: v_add3_u32 v55, v65, v66, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v66, 0x40c00000, v66 :: v_dual_cndmask_b32 v31, v65, v67
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v65, v68, v64, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v67, 0x400000, v64
+; GFX11-TRUE16-NEXT: v_bfe_u32 v68, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64
+; GFX11-TRUE16-NEXT: v_bfe_u32 v69, v66, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v55.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v64, v65, v67, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v65, v68, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v67, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add3_u32 v68, v69, v66, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v66
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v65, v67, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v1, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-TRUE16-NEXT: v_bfe_u32 v66, v67, 16, 1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v69, 0x40c00000, v69
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v55, v55, v64, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v67, 0x40c00000, v70
+; GFX11-TRUE16-NEXT: v_add3_u32 v65, v65, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v66, v68, v69, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v70, v67, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-TRUE16-NEXT: v_add_f32_e32 v68, 0x40c00000, v68
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v55.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v68, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v1
-; GFX11-TRUE16-NEXT: v_add3_u32 v64, v65, v68, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v68
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v68, 0x40c00000, v70
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v70, 16, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v64, v65 :: v_dual_and_b32 v3, 0xffff0000, v3
-; GFX11-TRUE16-NEXT: v_add3_u32 v64, v66, v67, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v67
-; GFX11-TRUE16-NEXT: v_bfe_u32 v66, v69, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v50.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v65, v69, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v65, v70, v67, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v67
+; GFX11-TRUE16-NEXT: v_bfe_u32 v70, v2, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v67, v67
-; GFX11-TRUE16-NEXT: v_bfe_u32 v67, v68, 16, 1
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v70, 0x40c00000, v70 :: v_dual_add_f32 v3, 0x40c00000, v3
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v64, v64, v65, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v65, v66, v69, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v66, 0x400000, v69
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v69, v69
-; GFX11-TRUE16-NEXT: v_bfe_u32 v69, v70, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v65, v65, v66, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v66, v67, v68, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v67, 0x400000, v68
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v68, 0x400000, v70
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v65
+; GFX11-TRUE16-NEXT: v_bfe_u32 v71, v68, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v67, v70, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v65, v65, v69, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v70, v71, v68, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v68
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v66, v66, v67, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v67, v69, v70, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v69, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v70, v70
-; GFX11-TRUE16-NEXT: v_bfe_u32 v70, v71, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v67, v67, v68, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v68, v69, v3, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v67, v69, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v67, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v68, v68
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v69, 0x40c00000, v80
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v65.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v67, v67, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v70, v71, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v80, v69, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v67
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v68, v69, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v68, v70, v71, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v71
-; GFX11-TRUE16-NEXT: v_bfe_u32 v70, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v71, v71
-; GFX11-TRUE16-NEXT: v_bfe_u32 v71, v80, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v68, v69, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v69, v70, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v70, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v70, 0x40c00000, v70
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v52.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v67, v71, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v67, v80, v69, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v69
+; GFX11-TRUE16-NEXT: v_bfe_u32 v80, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v69, v69
+; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v70, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v68.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v69, v80, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v67, v67, v71, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v4
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v68
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v69, v70, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v69, v71, v80, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v70, 0x400000, v80
-; GFX11-TRUE16-NEXT: v_bfe_u32 v71, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v80, v80
-; GFX11-TRUE16-NEXT: v_bfe_u32 v80, v81, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v69, v69, v70, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v70, v71, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v80, v81, v70, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v70
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v69, v71, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v69, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v70, v70
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v71, 0x40c00000, v82
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v67.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v69, v69, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v70, v80, v81, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v80, 16, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v82, v71, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v69
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v70, v71, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v70, v80, v81, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v81
-; GFX11-TRUE16-NEXT: v_bfe_u32 v80, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v81, v81
-; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v82, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v5.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v70, v70, v71, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v71, v80, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v80, 0x400000, v6
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v71, v80, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v71, v81, v82, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v80, 0x400000, v82
-; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v7, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
-; GFX11-TRUE16-NEXT: v_bfe_u32 v82, v83, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v71, v71, v80, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v80, v81, v7, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v80, 0x40c00000, v80 :: v_dual_cndmask_b32 v5, v69, v81
+; GFX11-TRUE16-NEXT: v_add3_u32 v69, v82, v71, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v71
+; GFX11-TRUE16-NEXT: v_bfe_u32 v82, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v71, v71
+; GFX11-TRUE16-NEXT: v_bfe_u32 v83, v80, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v70.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v71, v82, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v69, v69, v81, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v82, v83, v80, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v80
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v71, v81, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v71, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v80, v80
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v81, 0x40c00000, v84
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v69.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v71, v71, v7, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v80, v82, v83, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v82, 16, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v84, v81, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v80, v81, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v80, v82, v83, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v83
-; GFX11-TRUE16-NEXT: v_bfe_u32 v82, v8, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
-; GFX11-TRUE16-NEXT: v_bfe_u32 v83, v84, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v80, v80, v81, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v81, v82, v8, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v82, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v82, 0x40c00000, v82 :: v_dual_cndmask_b32 v7, v71, v83
+; GFX11-TRUE16-NEXT: v_add3_u32 v71, v84, v81, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v81
+; GFX11-TRUE16-NEXT: v_bfe_u32 v84, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v81, v81
+; GFX11-TRUE16-NEXT: v_bfe_u32 v85, v82, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v80.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v81, v84, v8, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v71, v71, v83, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v8
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v81, v82, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v81, v83, v84, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v82, 0x400000, v84
-; GFX11-TRUE16-NEXT: v_bfe_u32 v83, v9, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
-; GFX11-TRUE16-NEXT: v_bfe_u32 v84, v85, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v81, v81, v82, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v82, v83, v9, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_add3_u32 v84, v85, v82, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v82
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v81, v83, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v83, 0x40c00000, v86
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v71.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v81, v81, v9, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v84, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v84, 16, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v86, v83, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v82, v83, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v82, v84, v85, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v85
-; GFX11-TRUE16-NEXT: v_bfe_u32 v84, v10, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v85, v85
-; GFX11-TRUE16-NEXT: v_bfe_u32 v85, v86, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v9.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v81
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v82, v83, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v83, v84, v10, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v84, 0x40c00000, v84
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v54.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v81, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v81, v86, v83, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v83
+; GFX11-TRUE16-NEXT: v_bfe_u32 v86, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-TRUE16-NEXT: v_bfe_u32 v87, v84, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v11, 0x40c00000, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v82.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v83, v86, v10, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v81, v81, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v10
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v7, 16, v9
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v83, v84, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v83, v85, v86, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v86
-; GFX11-TRUE16-NEXT: v_bfe_u32 v85, v11, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v86, v86
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v86, 0x40c00000, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v11
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v10.h
-; GFX11-TRUE16-NEXT: v_add3_u32 v85, v85, v11, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v83, v84, vcc_lo
-; GFX11-TRUE16-NEXT: v_bfe_u32 v84, v87, 16, 1
+; GFX11-TRUE16-NEXT: v_add3_u32 v86, v87, v84, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v87, 0x400000, v84
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v83, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v83, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v85, 0x40c00000, v96
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v81.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v83, v83, v11, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v84, v86, v87, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v86, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v87, 16, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v96, v85, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-TRUE16-NEXT: v_bfe_u32 v99, v86, 16, 1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v82
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v84, v84, v87, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v85, v96, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
-; GFX11-TRUE16-NEXT: v_add3_u32 v87, v99, v86, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v86
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v6, 16, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v11.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v84, v84, v97, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v86, v86
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v83
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v86, v87, v96, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v96, 16, v15
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v64.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v83, v86, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v83, 0x40c00000, v87
+; GFX11-TRUE16-NEXT: v_add3_u32 v86, v96, v85, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v87, 0x400000, v85
+; GFX11-TRUE16-NEXT: v_bfe_u32 v96, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v85, v85
+; GFX11-TRUE16-NEXT: v_bfe_u32 v97, v83, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v98, 0x400000, v83
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v84.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v85, v86, v87, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v86, v96, v12, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v87, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_add3_u32 v96, v97, v83, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v97, 16, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v99, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v86, v87, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v86, 0x40c00000, v97
+; GFX11-TRUE16-NEXT: v_add3_u32 v87, v99, v13, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v85.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v96, v98, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v98, 16, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v97, v86, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v5, 16, v11
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v6.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v70
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v96, 0x40c00000, v96 :: v_dual_add_f32 v15, 0x40c00000, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v5, 16, v6
-; GFX11-TRUE16-NEXT: v_bfe_u32 v101, v96, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v102, v15, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v113, 0x400000, v15
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v114, 0x400000, v96
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v17, 16, v69
-; GFX11-TRUE16-NEXT: v_add3_u32 v101, v101, v96, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v102, v102, v15, 0x7fff
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v66.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v27, 16, v55
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v51.h
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v29
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v31, 16, v66
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v98, v12, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v97, 0x400000, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v27, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v38.h
-; GFX11-TRUE16-NEXT: v_add3_u32 v85, v98, v12, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v98, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v24
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v66.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v87, v96, vcc_lo
; GFX11-TRUE16-NEXT: v_add_f32_e32 v87, 0x40c00000, v98
-; GFX11-TRUE16-NEXT: v_bfe_u32 v98, v13, 16, 1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v34.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-TRUE16-NEXT: v_add3_u32 v96, v97, v86, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v97, 0x400000, v86
+; GFX11-TRUE16-NEXT: v_bfe_u32 v98, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v86, v86
; GFX11-TRUE16-NEXT: v_bfe_u32 v99, v87, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v98, v98, v13, 0x7fff
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v103, 0x400000, v87
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v18
-; GFX11-TRUE16-NEXT: v_add3_u32 v99, v99, v87, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; GFX11-TRUE16-NEXT: v_bfe_u32 v100, v14, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v112, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v15, 0x40c00000, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v83.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v86, v96, v97, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v97, v98, v14, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v98, v99, v87, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v99, 0x400000, v87
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
+; GFX11-TRUE16-NEXT: v_bfe_u32 v96, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v101, 0x400000, v15
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v98, v99, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v100, v100, v14, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v100, v112, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v96, v96, v15, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v97, v100, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v14.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v102, v113, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v96, v96
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v101, v114, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v96
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v99, v103, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v99, 0x400000, v13
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v3, 16, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v87
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v98, v99, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v4, 16, v14
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v13.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v85, v97, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v86
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v12.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v84
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v3, 16, v13
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v8.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v80
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v4, 16, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v7.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v3, 16, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v4, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v68
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v64.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v17, 16, v65
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v54.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v3, 16, v67
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v22, 16, v64
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v52.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v17, 16, v53
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v49.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v22, 16, v52
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v25
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v17, 16, v48
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v36.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v22, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v33.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v17.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v17, 16, v35
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v37, 16, v38
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v86.h
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v39, 16, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v96, v101, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v87.h
; GFX11-TRUE16-NEXT: .LBB104_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -230606,641 +236493,1242 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v64bf16_to_v64i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
-; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
-; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
-; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
-; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
-; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
-; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
-; GFX11-NEXT: .LBB105_2: ; %cmp.true
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v16
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s24, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v17
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; GFX11-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v0
-; GFX11-NEXT: v_add_f32_e32 v0, 0x40c00000, v4
-; GFX11-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v5, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v11, v3
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v6, v9, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v7, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_dual_add_f32 v3, 0x40c00000, v6 :: v_dual_add_nc_u32 v2, v2, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v1, v5 :: v_dual_lshlrev_b32 v5, 16, v19
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_add_f32 v4, 0x40c00000, v4
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v2, v6 :: v_dual_add_nc_u32 v2, v7, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v19, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v0, v1, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v20
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_add_f32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v35, v0, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v7, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v22
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v0, v1, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v21, v0, v1, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v22
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v23
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v1, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v7, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v37, v0, v6 :: v_dual_add_nc_u32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_cndmask_b32 v23, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v38, v0, v1, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v24
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v25
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v25
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_cndmask_b32 v24, v1, v3
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v7 :: v_dual_add_f32 v3, 0x40c00000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v7, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_cndmask_b32 v39, v0, v6 :: v_dual_add_nc_u32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v25, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v26
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v48, v0, v1, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v27
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v49, v1, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v49
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v7, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v28
-; GFX11-NEXT: v_dual_cndmask_b32 v26, v0, v6 :: v_dual_add_nc_u32 v1, v1, v4
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v0, v1 :: v_dual_lshlrev_b32 v6, 16, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v50, v0, v1, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v29
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v28, v1, v3 :: v_dual_lshlrev_b32 v5, 16, v29
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v7, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v51, v0, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_dual_cndmask_b32 v29, v0, v1 :: v_dual_lshlrev_b32 v6, 16, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v52, v0, v1 :: v_dual_add_nc_u32 v1, 0x7fff, v4
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v31
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_cndmask_b32 v30, v1, v3 :: v_dual_add_f32 v3, 0x40c00000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v31
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v53, v0, v5 :: v_dual_add_f32 v0, 0x40c00000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v3
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v6, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v54, v1, v2 :: v_dual_add_nc_u32 v1, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v3
-; GFX11-NEXT: v_bfe_u32 v5, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v54
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v1, v2, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v1
-; GFX11-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v55, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v6
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_add_nc_u32 v1, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v3
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v9, v4, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_lshl_b32 s0, s15, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v4
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v64, v5, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v10
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s16, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v65, v6, v7, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v65
-; GFX11-NEXT: v_cndmask_b32_e32 v66, v4, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_and_b32 s0, s17, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v9, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v67, v6, v7, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v4
-; GFX11-NEXT: v_bfe_u32 v8, v10, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v67
-; GFX11-NEXT: v_cndmask_b32_e32 v68, v5, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v10
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s18, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v5
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v69, v6, v7, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v10, v6
-; GFX11-NEXT: v_bfe_u32 v10, v12, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v70, v7, v8, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v12
-; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s0, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v7
-; GFX11-NEXT: v_and_or_b32 v5, 0xffff0000, v69, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v11, v7
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v13
-; GFX11-NEXT: v_bfe_u32 v71, v11, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v80, v8, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v13
-; GFX11-NEXT: s_lshl_b32 s0, s21, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v9, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v71, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v11
-; GFX11-NEXT: v_bfe_u32 v71, v12, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v10 :: v_dual_add_nc_u32 v9, 0x7fff, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v10, v14, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s22, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v81, v9, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v71, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v14
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v12
-; GFX11-NEXT: v_bfe_u32 v71, v11, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v82, 0x400000, v14
-; GFX11-NEXT: v_bfe_u32 v83, v13, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v71, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v83, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v82, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v71, 0x400000, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_lshl_b32 s0, s23, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v82, 0x400000, v13
-; GFX11-NEXT: v_bfe_u32 v83, v15, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v84, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v71, v12, v71, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v83, v15
-; GFX11-NEXT: v_bfe_u32 v13, v84, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v83, 0x400000, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v14, v82, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v82, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v84
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: v_bfe_u32 v85, v14, 16, 1
-; GFX11-NEXT: v_bfe_u32 v86, v82, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s25, 0xffff0000
-; GFX11-NEXT: v_or_b32_e32 v96, 0x400000, v82
-; GFX11-NEXT: v_dual_cndmask_b32 v83, v12, v83 :: v_dual_add_nc_u32 v12, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v84
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v85, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
-; GFX11-NEXT: v_add_nc_u32_e32 v85, v86, v82
-; GFX11-NEXT: v_or_b32_e32 v84, 0x400000, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX11-NEXT: v_dual_cndmask_b32 v12, v12, v13 :: v_dual_add_nc_u32 v15, 0x7fff, v15
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v85, 0x7fff, v85
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s25, 16
-; GFX11-NEXT: v_and_or_b32 v6, 0xffff0000, v70, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v84, v15, v84, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
-; GFX11-NEXT: v_add_f32_e64 v87, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v86, v13, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v82, v85, v96, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v85, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s26, 16
-; GFX11-NEXT: v_bfe_u32 v15, v87, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v96, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_bfe_u32 v97, v85, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v98, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s27, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v99, v96, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v100, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v113, 0x400000, v96
-; GFX11-NEXT: v_bfe_u32 v101, v98, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v96, v96
-; GFX11-NEXT: v_add_nc_u32_e32 v99, v99, v96
-; GFX11-NEXT: v_add_nc_u32_e32 v97, v97, v85
-; GFX11-NEXT: v_bfe_u32 v103, v100, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v101, v101, v98
-; GFX11-NEXT: v_or_b32_e32 v114, 0x400000, v98
-; GFX11-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v99
-; GFX11-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v97
-; GFX11-NEXT: v_or_b32_e32 v112, 0x400000, v85
-; GFX11-NEXT: v_add_nc_u32_e32 v101, 0x7fff, v101
-; GFX11-NEXT: v_add_nc_u32_e32 v103, v103, v100
-; GFX11-NEXT: v_cndmask_b32_e32 v96, v99, v113, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v98, v98
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v87
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v86, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v103
-; GFX11-NEXT: v_or_b32_e32 v103, 0x400000, v100
-; GFX11-NEXT: v_cndmask_b32_e32 v98, v101, v114, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v85, v85
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v102, 0x400000, v87
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v86, 0x400000, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v85, v97, v112, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v100, v100
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v96
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v64, v65
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v55, v69
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v97, v99, v103, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v28
-; GFX11-NEXT: v_and_or_b32 v4, 0xffff0000, v68, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v87, v15, v102, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v66, v67
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v27
-; GFX11-NEXT: v_and_or_b32 v29, 0xffff0000, v52, v55
-; GFX11-NEXT: v_and_or_b32 v28, 0xffff0000, v51, v64
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v14, v86, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v14, 0xffff0000, v85, v96
-; GFX11-NEXT: v_lshrrev_b32_e32 v85, 16, v87
-; GFX11-NEXT: v_lshrrev_b32_e32 v87, 16, v11
-; GFX11-NEXT: v_and_or_b32 v27, 0xffff0000, v50, v65
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v98, 16, v98
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v82
-; GFX11-NEXT: v_lshrrev_b32_e32 v86, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v10
-; GFX11-NEXT: v_and_or_b32 v10, 0xffff0000, v71, v87
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v81
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: v_and_or_b32 v30, 0xffff0000, v53, v54
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v22
-; GFX11-NEXT: v_and_or_b32 v25, 0xffff0000, v48, v49
-; GFX11-NEXT: v_and_or_b32 v24, 0xffff0000, v39, v50
-; GFX11-NEXT: v_and_or_b32 v23, 0xffff0000, v38, v51
-; GFX11-NEXT: v_and_or_b32 v22, 0xffff0000, v37, v52
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v17
-; GFX11-NEXT: v_and_or_b32 v15, 0xffff0000, v97, v98
-; GFX11-NEXT: v_and_or_b32 v13, 0xffff0000, v13, v85
-; GFX11-NEXT: v_and_or_b32 v12, 0xffff0000, v84, v82
-; GFX11-NEXT: v_and_or_b32 v11, 0xffff0000, v83, v86
-; GFX11-NEXT: v_and_or_b32 v9, 0xffff0000, v9, v96
-; GFX11-NEXT: v_and_or_b32 v8, 0xffff0000, v8, v71
-; GFX11-NEXT: v_and_or_b32 v7, 0xffff0000, v80, v7
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v1, v68
-; GFX11-NEXT: v_and_or_b32 v31, 0xffff0000, v31, v70
-; GFX11-NEXT: v_and_or_b32 v26, 0xffff0000, v26, v66
-; GFX11-NEXT: v_and_or_b32 v21, 0xffff0000, v21, v53
-; GFX11-NEXT: v_and_or_b32 v20, 0xffff0000, v35, v36
-; GFX11-NEXT: v_and_or_b32 v19, 0xffff0000, v34, v37
-; GFX11-NEXT: v_and_or_b32 v18, 0xffff0000, v33, v38
-; GFX11-NEXT: v_and_or_b32 v17, 0xffff0000, v32, v39
-; GFX11-NEXT: v_and_or_b32 v16, 0xffff0000, v16, v48
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: s_branch .LBB105_2
-; GFX11-NEXT: .LBB105_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v64i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-TRUE16-NEXT: .LBB105_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_lshlrev_b32 v3, 16, v17
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_cndmask_b32 v16, v5, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v11, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v7, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v0, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v19
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v18, v5, v7 :: v_dual_and_b32 v7, 0xffff0000, v20
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v33.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v7 :: v_dual_add_nc_u32 v0, v5, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v34.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_lshlrev_b32 v6, 16, v21
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v35, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v35.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v32.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v23
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v36.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v21, v4, v8, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_add_f32 v6, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v5, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v23
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_cndmask_b32 v37, v0, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v37.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v22, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v24
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v24
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v7 :: v_dual_add_nc_u32 v0, v5, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v4, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v25
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v38.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v39, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v24, v4, v7 :: v_dual_and_b32 v7, 0xffff0000, v26
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v26
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v48, v0, v1 :: v_dual_add_f32 v1, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v48.h
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v25
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v25, v4, v8 :: v_dual_add_nc_u32 v0, v5, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_cndmask_b32 v49, v0, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v49.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v26, v4, v7 :: v_dual_and_b32 v7, 0xffff0000, v28
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v28
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v50, v0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v7 :: v_dual_add_nc_u32 v0, v5, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v29
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v4, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v50.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v51, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v51.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v30
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v7, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v52, v0, v4, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v52.h
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v29
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v30
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v29, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v31
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v53, v2, v6 :: v_dual_lshlrev_b32 v2, 16, v31
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v53.h
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v30, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v54, v0, v4 :: v_dual_add_nc_u32 v1, v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v64, v0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v0, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v3 :: v_dual_add_nc_u32 v1, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v55, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v64.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v2, v8 :: v_dual_add_nc_u32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v65, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v2, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v3, v7 :: v_dual_add_nc_u32 v3, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v10
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v66, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v54.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v66.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v4, v9 :: v_dual_add_nc_u32 v4, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v67, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v4, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v5, v9 :: v_dual_add_nc_u32 v5, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v67.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v13
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v68.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v6, v11 :: v_dual_add_nc_u32 v6, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v69, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v6, v9
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v6, v7, v11 :: v_dual_add_nc_u32 v7, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v14
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v70, v7, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v69.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v70.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v8, v13 :: v_dual_add_nc_u32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v15
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v71, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v80, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v8, v11
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v10, v12
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v81, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v8, v9, v13 :: v_dual_add_nc_u32 v9, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v80, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v13, v80
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v81, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v9, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v80
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v71.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v81
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v82.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v10, v15 :: v_dual_add_nc_u32 v10, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v80, v80
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v80, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v81
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v84, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v81, v81
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v83, v10, v13
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v84, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v11, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v15, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v83
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v81, v81, v84
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v86, v12, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v83, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v84
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v11, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v12, v83
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v81, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v83
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v84, v13, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v81, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v87, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v13, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v81
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v81
+; GFX11-TRUE16-NEXT: v_bfe_u32 v83, v87, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v83, v83, v87
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v84.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v85, v13, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v81, v81
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v86.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v85.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v13, v14, v96 :: v_dual_add_nc_u32 v14, 0x7fff, v83
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v87
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v97, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
+; GFX11-TRUE16-NEXT: v_bfe_u32 v98, v83, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v100, v97, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v14, v96, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v98, v83
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v99, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v101, 0x400000, v83
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v98, v100, v97
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v81, v81, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v96, v99, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v98, 0x7fff, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v102, 0x400000, v97
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v14, v101, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v97, v97
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v81, 0x7fff, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v96, v96, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v101, 0x400000, v99
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v98, v102, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v87.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v96, 0x7fff, v96
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v80.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v65.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v81, v100, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v99, v99
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v83.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v55.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v96, v101, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v97.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB105_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB105_2
+; GFX11-TRUE16-NEXT: .LBB105_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v64i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-FAKE16-NEXT: .LBB105_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s24, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v5, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v11, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v7, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v6 :: v_dual_add_nc_u32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v1, v5 :: v_dual_lshlrev_b32 v5, 16, v19
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_add_f32 v4, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v2, v6 :: v_dual_add_nc_u32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v19, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v21
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v20
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_add_f32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v1, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v35, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v22
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v21, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v22
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v23
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v23
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v1, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v37, v0, v6 :: v_dual_add_nc_u32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v23, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v38, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v24
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v25
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v25
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_cndmask_b32 v24, v1, v3
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v7 :: v_dual_add_f32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v39, v0, v6 :: v_dual_add_nc_u32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v25, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v26
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v48, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v27
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v49, v1, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v49
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v28
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v26, v0, v6 :: v_dual_add_nc_u32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v0, v1 :: v_dual_lshlrev_b32 v6, 16, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v50, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v29
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v28, v1, v3 :: v_dual_lshlrev_b32 v5, 16, v29
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v51, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v29, v0, v1 :: v_dual_lshlrev_b32 v6, 16, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v52, v0, v1 :: v_dual_add_nc_u32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v31
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v30, v1, v3 :: v_dual_add_f32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v31
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v53, v0, v5 :: v_dual_add_f32 v0, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v54, v1, v2 :: v_dual_add_nc_u32 v1, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v54
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v55, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_add_nc_u32 v1, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v64, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v65, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v65
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v66, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v67, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v67
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v68, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v69, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v10, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v70, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, 0xffff0000, v69, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v11, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v71, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v80, v8, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v9, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v71, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v71, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v8, v10 :: v_dual_add_nc_u32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v14, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v81, v9, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v71, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v14
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v71, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v82, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v83, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v71, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v83, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v82, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v71, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v82, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v83, v15, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v84, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v71, v12, v71, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v83, v15
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v84, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v83, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v14, v82, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v82, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v84
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: v_bfe_u32 v85, v14, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v86, v82, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v96, 0x400000, v82
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v83, v12, v83 :: v_dual_add_nc_u32 v12, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v84
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v85, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v85, v86, v82
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v84, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v12, v12, v13 :: v_dual_add_nc_u32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v85, 0x7fff, v85
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-FAKE16-NEXT: v_and_or_b32 v6, 0xffff0000, v70, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v84, v15, v84, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v87, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v86, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v82, v85, v96, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v85, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v87, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v96, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v97, v85, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v98, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v99, v96, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v100, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v113, 0x400000, v96
+; GFX11-FAKE16-NEXT: v_bfe_u32 v101, v98, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v96, v96
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v99, v99, v96
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v97, v97, v85
+; GFX11-FAKE16-NEXT: v_bfe_u32 v103, v100, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v101, v101, v98
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v114, 0x400000, v98
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v99
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v97
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v112, 0x400000, v85
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v101, 0x7fff, v101
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v103, v103, v100
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v96, v99, v113, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v98, v98
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v87
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v86, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v103
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v103, 0x400000, v100
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v98, v101, v114, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v85, v85
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v102, 0x400000, v87
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v86, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v85, v97, v112, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v100, v100
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v96, 16, v96
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v64, v65
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v55, v69
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v97, v99, v103, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v28
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, 0xffff0000, v68, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v87, v15, v102, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v66, v67
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v27
+; GFX11-FAKE16-NEXT: v_and_or_b32 v29, 0xffff0000, v52, v55
+; GFX11-FAKE16-NEXT: v_and_or_b32 v28, 0xffff0000, v51, v64
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v14, v86, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_or_b32 v14, 0xffff0000, v85, v96
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v85, 16, v87
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v87, 16, v11
+; GFX11-FAKE16-NEXT: v_and_or_b32 v27, 0xffff0000, v50, v65
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v98, 16, v98
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v82
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v86, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v96, 16, v10
+; GFX11-FAKE16-NEXT: v_and_or_b32 v10, 0xffff0000, v71, v87
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v81
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: v_and_or_b32 v30, 0xffff0000, v53, v54
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v22
+; GFX11-FAKE16-NEXT: v_and_or_b32 v25, 0xffff0000, v48, v49
+; GFX11-FAKE16-NEXT: v_and_or_b32 v24, 0xffff0000, v39, v50
+; GFX11-FAKE16-NEXT: v_and_or_b32 v23, 0xffff0000, v38, v51
+; GFX11-FAKE16-NEXT: v_and_or_b32 v22, 0xffff0000, v37, v52
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v17
+; GFX11-FAKE16-NEXT: v_and_or_b32 v15, 0xffff0000, v97, v98
+; GFX11-FAKE16-NEXT: v_and_or_b32 v13, 0xffff0000, v13, v85
+; GFX11-FAKE16-NEXT: v_and_or_b32 v12, 0xffff0000, v84, v82
+; GFX11-FAKE16-NEXT: v_and_or_b32 v11, 0xffff0000, v83, v86
+; GFX11-FAKE16-NEXT: v_and_or_b32 v9, 0xffff0000, v9, v96
+; GFX11-FAKE16-NEXT: v_and_or_b32 v8, 0xffff0000, v8, v71
+; GFX11-FAKE16-NEXT: v_and_or_b32 v7, 0xffff0000, v80, v7
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v1, v68
+; GFX11-FAKE16-NEXT: v_and_or_b32 v31, 0xffff0000, v31, v70
+; GFX11-FAKE16-NEXT: v_and_or_b32 v26, 0xffff0000, v26, v66
+; GFX11-FAKE16-NEXT: v_and_or_b32 v21, 0xffff0000, v21, v53
+; GFX11-FAKE16-NEXT: v_and_or_b32 v20, 0xffff0000, v35, v36
+; GFX11-FAKE16-NEXT: v_and_or_b32 v19, 0xffff0000, v34, v37
+; GFX11-FAKE16-NEXT: v_and_or_b32 v18, 0xffff0000, v33, v38
+; GFX11-FAKE16-NEXT: v_and_or_b32 v17, 0xffff0000, v32, v39
+; GFX11-FAKE16-NEXT: v_and_or_b32 v16, 0xffff0000, v16, v48
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB105_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB105_2
+; GFX11-FAKE16-NEXT: .LBB105_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
index 582f31b..c6211aa 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
@@ -3090,108 +3090,206 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v4i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s3, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v3
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v5, v8, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v4i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v10, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v1, v12, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v10.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB23_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB23_2
+; GFX11-TRUE16-NEXT: .LBB23_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v4i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB23_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB23_2
+; GFX11-FAKE16-NEXT: .LBB23_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7531,108 +7629,206 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v4f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s3, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v3
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v5, v8, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v4f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v10, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v1, v12, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v10.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB47_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v4f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB47_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: .LBB47_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11622,108 +11818,206 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v2i64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s3, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v3
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v5, v8, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
-; GFX11-NEXT: .LBB67_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v2i64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v10, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v1, v12, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v10.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB67_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB67_2
+; GFX11-TRUE16-NEXT: .LBB67_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v2i64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB67_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB67_2
+; GFX11-FAKE16-NEXT: .LBB67_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15292,108 +15586,206 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v2f64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s3, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v3
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v5, v8, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v2f64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v10, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v1, v12, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v10.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB83_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB83_2
+; GFX11-TRUE16-NEXT: .LBB83_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v2f64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB83_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB83_2
+; GFX11-FAKE16-NEXT: .LBB83_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18154,83 +18546,75 @@ define <8 x i16> @bitcast_v8bf16_to_v8i16(<8 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB94_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v0, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_add3_u32 v9, v9, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v1.l
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v8, 0x40c00000, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v7, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v1, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v5, v7 :: v_dual_add_f32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v11, v8, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-TRUE16-NEXT: v_add3_u32 v12, v12, v7, 0x7fff
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; GFX11-TRUE16-NEXT: v_add3_u32 v8, v11, v1, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v9, v9, v0, 0x7fff
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v9, v10, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v7, v8 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 0x40c00000, v4 :: v_dual_add_f32 v2, 0x40c00000, v2
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v3.l
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v12, v13, vcc_lo
-; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
-; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-TRUE16-NEXT: v_add3_u32 v9, v9, v7, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v9, v11, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v9, v12, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v13, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v11, v12 :: v_dual_and_b32 v3, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v6.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v10, v9, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add3_u32 v9, v10, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_add3_u32 v13, v14, v3, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v13, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v11, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v11, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v9, v15, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v10, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v7
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v0, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v1.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v1, 16, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v4, 16, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v9, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v8, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.h
; GFX11-TRUE16-NEXT: .LBB94_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -18580,104 +18964,191 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v8i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v2
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s3, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v10, v4
-; GFX11-NEXT: v_bfe_u32 v10, v7, 16, 1
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v9
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v10, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v9
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v3, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v5
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v1
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v5, v4
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v3, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v6, v8
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v9
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
-; GFX11-NEXT: .LBB95_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v8i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v5, v8 :: v_dual_add_nc_u32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v10, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v12, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v2, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v6.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v8, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB95_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB95_2
+; GFX11-TRUE16-NEXT: .LBB95_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v8i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v10, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v5, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v3, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v6, v8
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v9
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB95_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB95_2
+; GFX11-FAKE16-NEXT: .LBB95_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21477,112 +21948,210 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v8f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v2
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s3, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v10, v4
-; GFX11-NEXT: v_bfe_u32 v10, v7, 16, 1
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v9
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v10, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v9
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v3, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v5
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v11
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v2, v8, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v3, v3, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v6, 16, v7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
-; GFX11-NEXT: .LBB103_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v8f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s1, 0, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v10, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v7, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v5, v11 :: v_dual_add_nc_u32 v10, v10, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v12, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v8.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v6.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v10, v11 :: v_dual_add_nc_u32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v9.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v7.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB103_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB103_2
+; GFX11-TRUE16-NEXT: .LBB103_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v8f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v10, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v8, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v3, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v6, 16, v7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB103_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB103_2
+; GFX11-FAKE16-NEXT: .LBB103_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24345,152 +24914,299 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v12, v16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v16i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s11, s3, 24
-; GFX11-NEXT: s_lshr_b32 s18, s3, 16
-; GFX11-NEXT: s_lshr_b32 s14, s3, 8
-; GFX11-NEXT: s_lshr_b32 s16, s2, 16
-; GFX11-NEXT: s_lshr_b32 s15, s2, 8
-; GFX11-NEXT: s_lshr_b32 s9, s1, 24
-; GFX11-NEXT: s_lshr_b32 s17, s1, 16
-; GFX11-NEXT: s_lshr_b32 s10, s1, 8
-; GFX11-NEXT: s_lshr_b32 s13, s0, 16
-; GFX11-NEXT: s_lshr_b32 s12, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
-; GFX11-NEXT: .LBB109_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: s_lshl_b32 s0, s3, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s2
-; GFX11-NEXT: v_bfe_u32 v10, v7, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v10, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
-; GFX11-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v11
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v16
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v10, v14, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 24, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v10
-; GFX11-NEXT: v_lshl_or_b32 v9, v3, 16, v7
-; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: s_branch .LBB109_5
-; GFX11-NEXT: .LBB109_3:
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr17
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr18
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: s_branch .LBB109_2
-; GFX11-NEXT: .LBB109_4:
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s1
-; GFX11-NEXT: v_dual_mov_b32 v16, s3 :: v_dual_mov_b32 v9, s15
-; GFX11-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s11
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v13, s14
-; GFX11-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v1, s12
-; GFX11-NEXT: v_dual_mov_b32 v10, s16 :: v_dual_mov_b32 v7, s9
-; GFX11-NEXT: v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v5, s10
-; GFX11-NEXT: v_mov_b32_e32 v11, s6
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB109_5: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v4, v17
-; GFX11-NEXT: v_mov_b32_e32 v12, v16
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v16i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s3, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s18, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s3, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s16, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s2, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s1, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s17, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s1, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s0, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-TRUE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s1, 16
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s1, 0, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s2, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v2 :: v_dual_add_nc_u32 v1, v8, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v17.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v6.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v14.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v11, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v16.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 24, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v8.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v3.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: s_branch .LBB109_5
+; GFX11-TRUE16-NEXT: .LBB109_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr17
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr16
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr18
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: s_branch .LBB109_2
+; GFX11-TRUE16-NEXT: .LBB109_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s3 :: v_dual_mov_b32 v9, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v13, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v1, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s16 :: v_dual_mov_b32 v7, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v5, s10
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, s6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-TRUE16-NEXT: .LBB109_5: ; %end
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v17
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v12, v16
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v16i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s3, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s18, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s3, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s16, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s2, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s1, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s17, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s1, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s0, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s0, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-FAKE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v10, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v14, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 24, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 8, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v3, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: s_branch .LBB109_5
+; GFX11-FAKE16-NEXT: .LBB109_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr17
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr16
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr18
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: s_branch .LBB109_2
+; GFX11-FAKE16-NEXT: .LBB109_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s3 :: v_dual_mov_b32 v9, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v13, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v1, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s16 :: v_dual_mov_b32 v7, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v5, s10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, s6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-FAKE16-NEXT: .LBB109_5: ; %end
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v17
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v12, v16
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
index 0a73571..01e397d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
@@ -4485,203 +4485,384 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v8i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s8, s7, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
-; GFX11-NEXT: s_and_b32 s8, s6, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_and_b32 s7, s5, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
-; GFX11-NEXT: v_bfe_u32 v11, v6, 16, 1
-; GFX11-NEXT: s_and_b32 s5, s4, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v11, v6
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v9, v10
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v6, v8
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v10
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v8
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v3, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v9
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_bfe_u32 v16, v14, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v12, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v11, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v15, v9, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v13
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v2, v8, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v8i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v6, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v11, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v3, v9, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v10, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v13, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v14, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v10, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v1, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v14.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB23_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB23_2
+; GFX11-TRUE16-NEXT: .LBB23_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v8i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v11, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v9, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v6, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v8, 16, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB23_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB23_2
+; GFX11-FAKE16-NEXT: .LBB23_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11456,203 +11637,384 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v8f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s8, s7, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
-; GFX11-NEXT: s_and_b32 s8, s6, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_and_b32 s7, s5, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
-; GFX11-NEXT: v_bfe_u32 v11, v6, 16, 1
-; GFX11-NEXT: s_and_b32 s5, s4, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v11, v6
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v9, v10
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v6, v8
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v10
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v8
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v3, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v9
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_bfe_u32 v16, v14, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v12, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v11, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v15, v9, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v13
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v2, v8, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v8f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v6, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v11, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v3, v9, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v10, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v13, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v14, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v10, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v1, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v14.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB47_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v8f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v11, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v9, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v6, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v8, 16, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB47_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: .LBB47_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17995,203 +18357,384 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v4i64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s8, s7, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
-; GFX11-NEXT: s_and_b32 s8, s6, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_and_b32 s7, s5, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
-; GFX11-NEXT: v_bfe_u32 v11, v6, 16, 1
-; GFX11-NEXT: s_and_b32 s5, s4, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v11, v6
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v9, v10
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v6, v8
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v10
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v8
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v3, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v9
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_bfe_u32 v16, v14, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v12, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v11, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v15, v9, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v13
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v2, v8, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
-; GFX11-NEXT: .LBB67_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v4i64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v6, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v11, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v3, v9, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v10, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v13, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v14, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v10, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v1, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v14.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB67_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB67_2
+; GFX11-TRUE16-NEXT: .LBB67_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v4i64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v11, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v9, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v6, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v8, 16, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB67_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB67_2
+; GFX11-FAKE16-NEXT: .LBB67_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23982,203 +24525,384 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v4f64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s8, s7, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
-; GFX11-NEXT: s_and_b32 s8, s6, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_and_b32 s7, s5, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
-; GFX11-NEXT: v_bfe_u32 v11, v6, 16, 1
-; GFX11-NEXT: s_and_b32 s5, s4, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v11, v6
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v9, v10
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v6, v8
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v10
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v8
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v3, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v9
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_bfe_u32 v16, v14, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v12, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v11, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v15, v9, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v13
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v2, v8, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v4f64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v6, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v11, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v3, v9, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v10, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v13, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v14, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v10, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v1, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v14.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB83_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB83_2
+; GFX11-TRUE16-NEXT: .LBB83_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v4f64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v11, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v9, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v6, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v8, 16, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB83_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB83_2
+; GFX11-FAKE16-NEXT: .LBB83_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -28722,13 +29446,10 @@ define <16 x i16> @bitcast_v16bf16_to_v16i16(<16 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB94_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v0
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 0x40c00000, v9 :: v_dual_add_f32 v8, 0x40c00000, v8
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 0x40c00000, v9 :: v_dual_lshlrev_b32 v8, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v9, 16, 1
; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v8, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
@@ -28736,142 +29457,128 @@ define <16 x i16> @bitcast_v16bf16_to_v16i16(<16 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v9
; GFX11-TRUE16-NEXT: v_add3_u32 v14, v14, v9, 0x7fff
; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v8, 0x7fff
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v11, v12, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v8, v11, v12 :: v_dual_and_b32 v1, 0xffff0000, v1
; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_lshlrev_b32 v10, 16, v2
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v12, 16, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v1, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_dual_add_f32 v12, 0x40c00000, v12 :: v_dual_add_f32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v0, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v0
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add3_u32 v13, v13, v0, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v13, v15, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v1
; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v8.h
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v14, v16, vcc_lo
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v4
; GFX11-TRUE16-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v10, 16, 1
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v11, v13, vcc_lo
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v10
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v9.h
; GFX11-TRUE16-NEXT: v_add3_u32 v11, v14, v10, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v2, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v11, v13, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v11, v14, v2, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v2
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-TRUE16-NEXT: v_add3_u32 v14, v15, v12, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v11, v13, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v3, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v10.h
; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v11, v15, vcc_lo
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v14, 0x40c00000, v14 :: v_dual_add_f32 v5, 0x40c00000, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v14
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v11, v18, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v18, 16, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_add_f32 v7, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v11, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v11, 0x40c00000, v15
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_add_f32_e32 v13, 0x40c00000, v16
-; GFX11-TRUE16-NEXT: v_bfe_u32 v23, v18, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v12.h
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v13, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v13
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v15, v16, v13, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v14, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v16, v16, v14, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v14, 0x40c00000, v21 :: v_dual_cndmask_b32 v11, v16, v19
-; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v14, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v14, v16, v13, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v14, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add3_u32 v14, v16, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v16, v17, v11, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v14, v15 :: v_dual_lshlrev_b32 v17, 16, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add3_u32 v15, v19, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v13.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v11, v16, v18 :: v_dual_lshlrev_b32 v18, 16, v7
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v14, 0x40c00000, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v14, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v15, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v15, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-TRUE16-NEXT: v_add3_u32 v16, v20, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v14, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v19, v21, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v19, v22, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v21, v23, v18, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v18
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-TRUE16-NEXT: v_add3_u32 v23, v24, v7, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v7
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v21, v22, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v23, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v16, v17, v14, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v11.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add3_u32 v16, v16, v7, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v17, v18, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v18, v19, v15, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v18, v19, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v7.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v19, v25, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v18
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v6.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v16, v20, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v0, 16, v7
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v11
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v15, v17, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v1, 16, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v2, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v11
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v1, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v2, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v11, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v12, 16, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v17, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v14.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v16, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h
; GFX11-TRUE16-NEXT: .LBB94_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -29497,175 +30204,334 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v16i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s8, s0, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: s_and_b32 s8, s1, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s8
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: s_and_b32 s1, s2, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: s_and_b32 s1, s5, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v8, v6
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v5, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v7
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s3, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_lshl_b32 s0, s3, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v10, v4
-; GFX11-NEXT: v_bfe_u32 v12, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v3, v11, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_and_b32 s0, s4, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v12, v5
-; GFX11-NEXT: v_bfe_u32 v12, v7, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s4, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v6, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v12, v7
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
-; GFX11-NEXT: s_lshl_b32 s0, s5, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v14, v10
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v10
-; GFX11-NEXT: v_bfe_u32 v15, v11, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v5, v12, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v15, v11
-; GFX11-NEXT: s_and_b32 s0, s6, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v16, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v13, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v7, v17, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s6, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v5
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s7, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v14, v18 :: v_dual_add_nc_u32 v14, 0x7fff, v16
-; GFX11-NEXT: v_bfe_u32 v16, v7, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v20, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v7
-; GFX11-NEXT: s_and_b32 s0, s7, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v22, v19, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v14, v18 :: v_dual_add_nc_u32 v14, v20, v17
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v20, v22, v19
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_bfe_u32 v18, v21, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v14, v14, v23, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v18, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v20, v24, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v22, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_and_or_b32 v5, 0xffff0000, v11, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v17, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v19
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v3, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v6, v15, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v6, 0xffff0000, v16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX11-NEXT: v_and_or_b32 v7, 0xffff0000, v7, v17
-; GFX11-NEXT: v_and_or_b32 v4, 0xffff0000, v13, v10
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v8, v12
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v9, v14
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v15
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
-; GFX11-NEXT: .LBB95_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v16i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v8.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v2, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v2, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v10.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v3, v4 :: v_dual_add_nc_u32 v3, v5, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v12
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s4, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s5, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v15
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v12.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v5, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s6, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v14.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v13.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v6, v16 :: v_dual_add_nc_u32 v6, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s7, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v11, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v6, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v18, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v20, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v6, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v16, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v19
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v18, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v9, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v11.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v16, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v17.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB95_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB95_2
+; GFX11-TRUE16-NEXT: .LBB95_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v16i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v8, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v5, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v4, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v10, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v3, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v12, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s4, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v12, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s5, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v14, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v5, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v15, v11
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v7, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s6, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s7, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v14, v18 :: v_dual_add_nc_u32 v14, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v7
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v14, v18 :: v_dual_add_nc_u32 v14, v20, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, v22, v19
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v21, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v14, v14, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v18, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v20, v24, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v16, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, 0xffff0000, v11, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v17, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v19
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v3, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v6, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_or_b32 v6, 0xffff0000, v16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v7, 0xffff0000, v7, v17
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, 0xffff0000, v13, v10
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v8, v12
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v9, v14
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v15
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB95_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB95_2
+; GFX11-FAKE16-NEXT: .LBB95_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -32462,177 +33328,351 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; GFX9-NEXT: s_branch .LBB99_2
;
-; GFX11-LABEL: bitcast_v32i8_to_v16i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
-; GFX11-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB99_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v16
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v20
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v18
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v17
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, v4, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v12
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v19
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: v_or_b32_e32 v6, v6, v9
-; GFX11-NEXT: s_or_b32 s10, s11, s12
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s10
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v13
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v14
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v7, v7, 16, v23
-; GFX11-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v3
-; GFX11-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB99_3
-; GFX11-NEXT: .LBB99_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s28, s28, 3
-; GFX11-NEXT: s_lshl_b32 s5, s29, 8
-; GFX11-NEXT: s_and_b32 s4, s28, 0xff
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_and_b32 s5, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-NEXT: s_add_i32 s26, s26, 3
-; GFX11-NEXT: s_or_b32 s5, s6, s5
-; GFX11-NEXT: s_and_b32 s6, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s27, 8
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: s_or_b32 s6, s7, s6
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: s_or_b32 s7, s8, s7
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-NEXT: s_add_i32 s16, s16, 3
-; GFX11-NEXT: s_or_b32 s8, s9, s8
-; GFX11-NEXT: s_and_b32 s9, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s17, 8
-; GFX11-NEXT: s_add_i32 s18, s18, 3
-; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_add_i32 s2, s2, 3
-; GFX11-NEXT: s_or_b32 s9, s10, s9
-; GFX11-NEXT: s_and_b32 s10, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s19, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-NEXT: s_or_b32 s10, s11, s10
-; GFX11-NEXT: s_or_b32 s0, s1, s0
-; GFX11-NEXT: s_or_b32 s1, s3, s2
-; GFX11-NEXT: s_addk_i32 s5, 0x300
-; GFX11-NEXT: s_addk_i32 s6, 0x300
-; GFX11-NEXT: s_addk_i32 s9, 0x300
-; GFX11-NEXT: s_addk_i32 s0, 0x300
-; GFX11-NEXT: s_addk_i32 s1, 0x300
-; GFX11-NEXT: s_addk_i32 s10, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s9, s10
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v16
-; GFX11-NEXT: s_addk_i32 s7, 0x300
-; GFX11-NEXT: s_addk_i32 s8, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v12
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v17
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v11, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v21, v2
-; GFX11-NEXT: v_or_b32_e32 v4, v20, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v1, v13, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
-; GFX11-NEXT: v_or_b32_e32 v3, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-NEXT: v_or_b32_e32 v6, v14, v6
-; GFX11-NEXT: s_addk_i32 s4, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-NEXT: v_and_b32_e64 v7, 0xffff, s4
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: .LBB99_3: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB99_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB99_2
+; GFX11-TRUE16-LABEL: bitcast_v32i8_to_v16i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s29, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v10
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v15
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v7, v11
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v13
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v3, v21
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB99_3
+; GFX11-TRUE16-NEXT: .LBB99_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_and_b32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v20, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v19, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v3, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v1.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v14, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: .LBB99_3: ; %end
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB99_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-TRUE16-NEXT: s_branch .LBB99_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v32i8_to_v16i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v16
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v4, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v5, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v9
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v3, 0xffff, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v7, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v7, 16, v23
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB99_3
+; GFX11-FAKE16-NEXT: .LBB99_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v17
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v20, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v19, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v14, v6
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v7, 0xffff, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: .LBB99_3: ; %end
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB99_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-FAKE16-NEXT: s_branch .LBB99_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34430,192 +35470,369 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v16f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s8, s0, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: s_and_b32 s8, s2, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s4, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v9, v5
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v6, v7, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v2
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s3, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v3, v4 :: v_dual_add_nc_u32 v4, v6, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s0, s3, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s4, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v4
-; GFX11-NEXT: v_bfe_u32 v4, v7, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v5, v9 :: v_dual_add_nc_u32 v5, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_and_b32 s0, s5, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v7
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s0, s5, 16
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v5
-; GFX11-NEXT: v_bfe_u32 v5, v12, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_bfe_u32 v13, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v12
-; GFX11-NEXT: s_and_b32 s0, s6, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v7, v14, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v14, v15, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s6, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v6
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v6
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v10 :: v_dual_add_nc_u32 v10, v14, v15
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s7, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v18, v14, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: s_and_b32 s0, s7, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v13, v16, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v15
-; GFX11-NEXT: v_bfe_u32 v20, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v18, v14
-; GFX11-NEXT: v_bfe_u32 v13, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v20, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v10, v10, v16 :: v_dual_add_nc_u32 v15, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v12
-; GFX11-NEXT: v_bfe_u32 v16, v19, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v14, v15, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v18, v22, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v16
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v13, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v15, v16, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_lshl_or_b32 v6, v12, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v7, v13, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v4, v17, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v3, v9, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v2, v11, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v1, v8, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v14
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
-; GFX11-NEXT: .LBB103_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v16f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v9, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v6, v2 :: v_dual_add_nc_u32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v10, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v18, v17
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v2, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s4, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v6, v7 :: v_dual_add_nc_u32 v4, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s5, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v7, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v11.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v14.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v9.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s6, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v15, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s7, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v13.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v12, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v20, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v8.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v10.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v12
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v15.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB103_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB103_2
+; GFX11-TRUE16-NEXT: .LBB103_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v16f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s4, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v9, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v3, v4 :: v_dual_add_nc_u32 v4, v6, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v5, v9 :: v_dual_add_nc_u32 v5, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s5, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v12
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v7, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v15, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s6, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v10 :: v_dual_add_nc_u32 v10, v14, v15
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s7, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v14, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v18, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v20, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v10, v10, v16 :: v_dual_add_nc_u32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v14, v15, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v18, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v13, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v15, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v12, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v13, 16, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v17, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v9, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v11, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v8, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v14
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB103_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB103_2
+; GFX11-FAKE16-NEXT: .LBB103_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -37354,177 +38571,351 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; GFX9-NEXT: s_branch .LBB107_2
;
-; GFX11-LABEL: bitcast_v32i8_to_v16f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
-; GFX11-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB107_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v16
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v20
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v18
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v17
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, v4, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v12
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v19
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: v_or_b32_e32 v6, v6, v9
-; GFX11-NEXT: s_or_b32 s10, s11, s12
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s10
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v13
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v14
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v7, v7, 16, v23
-; GFX11-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v3
-; GFX11-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB107_3
-; GFX11-NEXT: .LBB107_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s28, s28, 3
-; GFX11-NEXT: s_lshl_b32 s5, s29, 8
-; GFX11-NEXT: s_and_b32 s4, s28, 0xff
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_and_b32 s5, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-NEXT: s_add_i32 s26, s26, 3
-; GFX11-NEXT: s_or_b32 s5, s6, s5
-; GFX11-NEXT: s_and_b32 s6, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s27, 8
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: s_or_b32 s6, s7, s6
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: s_or_b32 s7, s8, s7
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-NEXT: s_add_i32 s16, s16, 3
-; GFX11-NEXT: s_or_b32 s8, s9, s8
-; GFX11-NEXT: s_and_b32 s9, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s17, 8
-; GFX11-NEXT: s_add_i32 s18, s18, 3
-; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_add_i32 s2, s2, 3
-; GFX11-NEXT: s_or_b32 s9, s10, s9
-; GFX11-NEXT: s_and_b32 s10, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s19, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-NEXT: s_or_b32 s10, s11, s10
-; GFX11-NEXT: s_or_b32 s0, s1, s0
-; GFX11-NEXT: s_or_b32 s1, s3, s2
-; GFX11-NEXT: s_addk_i32 s5, 0x300
-; GFX11-NEXT: s_addk_i32 s6, 0x300
-; GFX11-NEXT: s_addk_i32 s9, 0x300
-; GFX11-NEXT: s_addk_i32 s0, 0x300
-; GFX11-NEXT: s_addk_i32 s1, 0x300
-; GFX11-NEXT: s_addk_i32 s10, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s9, s10
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v16
-; GFX11-NEXT: s_addk_i32 s7, 0x300
-; GFX11-NEXT: s_addk_i32 s8, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v12
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v17
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v11, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v21, v2
-; GFX11-NEXT: v_or_b32_e32 v4, v20, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v1, v13, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
-; GFX11-NEXT: v_or_b32_e32 v3, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-NEXT: v_or_b32_e32 v6, v14, v6
-; GFX11-NEXT: s_addk_i32 s4, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-NEXT: v_and_b32_e64 v7, 0xffff, s4
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: .LBB107_3: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB107_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB107_2
+; GFX11-TRUE16-LABEL: bitcast_v32i8_to_v16f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s29, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v10
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v15
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v7, v11
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v13
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v3, v21
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB107_3
+; GFX11-TRUE16-NEXT: .LBB107_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_and_b32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v20, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v19, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v3, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v1.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v14, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: .LBB107_3: ; %end
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB107_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-TRUE16-NEXT: s_branch .LBB107_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v32i8_to_v16f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v16
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v4, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v5, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v9
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v3, 0xffff, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v7, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v7, 16, v23
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB107_3
+; GFX11-FAKE16-NEXT: .LBB107_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v17
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v20, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v19, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v14, v6
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v7, 0xffff, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: .LBB107_3: ; %end
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB107_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-FAKE16-NEXT: s_branch .LBB107_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -39446,281 +40837,552 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v28, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v32i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s27, s19, 24
-; GFX11-NEXT: s_lshr_b32 s46, s19, 16
-; GFX11-NEXT: s_lshr_b32 s40, s19, 8
-; GFX11-NEXT: s_lshr_b32 s42, s18, 16
-; GFX11-NEXT: s_lshr_b32 s41, s18, 8
-; GFX11-NEXT: s_lshr_b32 s23, s17, 24
-; GFX11-NEXT: s_lshr_b32 s45, s17, 16
-; GFX11-NEXT: s_lshr_b32 s26, s17, 8
-; GFX11-NEXT: s_lshr_b32 s29, s16, 16
-; GFX11-NEXT: s_lshr_b32 s28, s16, 8
-; GFX11-NEXT: s_lshr_b32 s15, s3, 24
-; GFX11-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-NEXT: s_lshr_b32 s22, s3, 8
-; GFX11-NEXT: s_lshr_b32 s25, s2, 16
-; GFX11-NEXT: s_lshr_b32 s24, s2, 8
-; GFX11-NEXT: s_lshr_b32 s13, s1, 24
-; GFX11-NEXT: s_lshr_b32 s43, s1, 16
-; GFX11-NEXT: s_lshr_b32 s14, s1, 8
-; GFX11-NEXT: s_lshr_b32 s21, s0, 16
-; GFX11-NEXT: s_lshr_b32 s20, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
-; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
-; GFX11-NEXT: .LBB109_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s1, 16
-; GFX11-NEXT: s_and_b32 s1, s1, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s4, s0, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: s_and_b32 s1, s3, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_and_b32 s0, s2, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v4, v10, v3
-; GFX11-NEXT: v_bfe_u32 v10, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v35
-; GFX11-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v0, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v4, v11 :: v_dual_add_nc_u32 v12, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v10, v7
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v9, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v1, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v5
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_bfe_u32 v8, v4, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v9
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v4
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v10, v14, 16, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v13, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s16, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v4, v7, v8 :: v_dual_add_nc_u32 v7, v12, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v13, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v13, v15, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v15
-; GFX11-NEXT: v_bfe_u32 v13, v17, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v12, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v13, v17
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s18, 16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_bfe_u32 v18, v13, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v12, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v20, v16, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v7, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v18, v13
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v20, v16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v4
-; GFX11-NEXT: v_bfe_u32 v17, v19, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v16
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v15, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v17, v17, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v18, v23, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v17
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v12, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v18, v22, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v15, v17, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v32
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 24, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v26, v30, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v17, v5, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v25, v7, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 24, v26
-; GFX11-NEXT: v_lshrrev_b64 v[19:20], 24, v[17:18]
-; GFX11-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
-; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
-; GFX11-NEXT: v_lshrrev_b64 v[27:28], 24, v[25:26]
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 8, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 8, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 8, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 8, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 24, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: s_branch .LBB109_5
-; GFX11-NEXT: .LBB109_3:
-; GFX11-NEXT: ; implicit-def: $sgpr20
-; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: s_branch .LBB109_2
-; GFX11-NEXT: .LBB109_4:
-; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v33, s17
-; GFX11-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v35, s1
-; GFX11-NEXT: v_dual_mov_b32 v30, s46 :: v_dual_mov_b32 v25, s41
-; GFX11-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v31, s27
-; GFX11-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v29, s40
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s28
-; GFX11-NEXT: v_dual_mov_b32 v34, s3 :: v_dual_mov_b32 v23, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s44 :: v_dual_mov_b32 v21, s26
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v9, s24
-; GFX11-NEXT: v_dual_mov_b32 v6, s43 :: v_dual_mov_b32 v15, s15
-; GFX11-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v13, s22
-; GFX11-NEXT: v_dual_mov_b32 v18, s29 :: v_dual_mov_b32 v1, s20
-; GFX11-NEXT: v_dual_mov_b32 v10, s25 :: v_dual_mov_b32 v7, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s21 :: v_dual_mov_b32 v5, s14
-; GFX11-NEXT: v_mov_b32_e32 v27, s10
-; GFX11-NEXT: v_mov_b32_e32 v19, s8
-; GFX11-NEXT: v_mov_b32_e32 v11, s6
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB109_5: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v4, v35
-; GFX11-NEXT: v_mov_b32_e32 v12, v34
-; GFX11-NEXT: v_mov_b32_e32 v20, v33
-; GFX11-NEXT: v_mov_b32_e32 v28, v32
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v32i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s27, s19, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s19, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s18, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s23, s17, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s26, s17, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s29, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s28, s16, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s3, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s22, s3, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s25, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s24, s2, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s1, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s1, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s21, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s20, s0, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-TRUE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s18, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v10, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v19, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v35.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v6.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, v20, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v4
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v9 :: v_dual_add_nc_u32 v4, v5, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v34.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v4 :: v_dual_add_nc_u32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v14.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v11.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v8, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v8.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v7, v12 :: v_dual_add_nc_u32 v12, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v22.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v15, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v13, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v12, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v12, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v15, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v13, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v33.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v30.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v20, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v32.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v13
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v15, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[19:20], 24, v[17:18]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v24.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 8, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v5.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 8, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[27:28], 24, v[25:26]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-TRUE16-NEXT: s_branch .LBB109_5
+; GFX11-TRUE16-NEXT: .LBB109_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr20
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr22
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr27
+; GFX11-TRUE16-NEXT: s_branch .LBB109_2
+; GFX11-TRUE16-NEXT: .LBB109_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v33, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v35, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s46 :: v_dual_mov_b32 v25, s41
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v31, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v29, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s3 :: v_dual_mov_b32 v23, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s44 :: v_dual_mov_b32 v21, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v9, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s43 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v13, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s29 :: v_dual_mov_b32 v1, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s25 :: v_dual_mov_b32 v7, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s21 :: v_dual_mov_b32 v5, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v27, s10
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v19, s8
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, s6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-TRUE16-NEXT: .LBB109_5: ; %end
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v35
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v12, v34
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v20, v33
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v32
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v32i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s27, s19, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s19, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s18, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s23, s17, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s26, s17, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s29, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s28, s16, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s3, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s22, s3, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s25, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s24, s2, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s1, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s1, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s21, s0, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s20, s0, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-FAKE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s1, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v4, v10, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v35
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v4, v11 :: v_dual_add_nc_u32 v12, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v10, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v9, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v14, 16, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v4, v7, v8 :: v_dual_add_nc_u32 v7, v12, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v13, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v15, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v13, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v13, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v7, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v18, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v20, v16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v15, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, v17, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v18, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v12, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v22, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v15, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 24, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v5, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v7, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 24, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[19:20], 24, v[17:18]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[27:28], 24, v[25:26]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 8, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 8, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 8, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 8, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 24, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 8, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-FAKE16-NEXT: s_branch .LBB109_5
+; GFX11-FAKE16-NEXT: .LBB109_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr20
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr27
+; GFX11-FAKE16-NEXT: s_branch .LBB109_2
+; GFX11-FAKE16-NEXT: .LBB109_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v33, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v35, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s46 :: v_dual_mov_b32 v25, s41
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v31, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v29, s40
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s3 :: v_dual_mov_b32 v23, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s44 :: v_dual_mov_b32 v21, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v9, s24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s43 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v13, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s29 :: v_dual_mov_b32 v1, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s25 :: v_dual_mov_b32 v7, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s21 :: v_dual_mov_b32 v5, s14
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v27, s10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v19, s8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, s6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-FAKE16-NEXT: .LBB109_5: ; %end
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v35
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v12, v34
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v20, v33
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v28, v32
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -41235,177 +42897,351 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; GFX9-NEXT: s_branch .LBB111_2
;
-; GFX11-LABEL: bitcast_v32i8_to_v16bf16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
-; GFX11-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB111_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v16
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v20
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v18
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v17
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, v4, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v12
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v19
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: v_or_b32_e32 v6, v6, v9
-; GFX11-NEXT: s_or_b32 s10, s11, s12
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s10
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v13
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v14
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v7, v7, 16, v23
-; GFX11-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v3
-; GFX11-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB111_3
-; GFX11-NEXT: .LBB111_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s28, s28, 3
-; GFX11-NEXT: s_lshl_b32 s5, s29, 8
-; GFX11-NEXT: s_and_b32 s4, s28, 0xff
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_and_b32 s5, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-NEXT: s_add_i32 s26, s26, 3
-; GFX11-NEXT: s_or_b32 s5, s6, s5
-; GFX11-NEXT: s_and_b32 s6, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s27, 8
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: s_or_b32 s6, s7, s6
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: s_or_b32 s7, s8, s7
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-NEXT: s_add_i32 s16, s16, 3
-; GFX11-NEXT: s_or_b32 s8, s9, s8
-; GFX11-NEXT: s_and_b32 s9, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s17, 8
-; GFX11-NEXT: s_add_i32 s18, s18, 3
-; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_add_i32 s2, s2, 3
-; GFX11-NEXT: s_or_b32 s9, s10, s9
-; GFX11-NEXT: s_and_b32 s10, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s19, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-NEXT: s_or_b32 s10, s11, s10
-; GFX11-NEXT: s_or_b32 s0, s1, s0
-; GFX11-NEXT: s_or_b32 s1, s3, s2
-; GFX11-NEXT: s_addk_i32 s5, 0x300
-; GFX11-NEXT: s_addk_i32 s6, 0x300
-; GFX11-NEXT: s_addk_i32 s9, 0x300
-; GFX11-NEXT: s_addk_i32 s0, 0x300
-; GFX11-NEXT: s_addk_i32 s1, 0x300
-; GFX11-NEXT: s_addk_i32 s10, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s9, s10
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v16
-; GFX11-NEXT: s_addk_i32 s7, 0x300
-; GFX11-NEXT: s_addk_i32 s8, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v12
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v17
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v11, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v21, v2
-; GFX11-NEXT: v_or_b32_e32 v4, v20, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v1, v13, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
-; GFX11-NEXT: v_or_b32_e32 v3, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-NEXT: v_or_b32_e32 v6, v14, v6
-; GFX11-NEXT: s_addk_i32 s4, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-NEXT: v_and_b32_e64 v7, 0xffff, s4
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: .LBB111_3: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB111_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB111_2
+; GFX11-TRUE16-LABEL: bitcast_v32i8_to_v16bf16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s29, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v10
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v15
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v7, v11
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v13
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v3, v21
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB111_3
+; GFX11-TRUE16-NEXT: .LBB111_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_and_b32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v20, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v19, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v3, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v1.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v14, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: .LBB111_3: ; %end
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB111_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-TRUE16-NEXT: s_branch .LBB111_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v32i8_to_v16bf16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v16
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v4, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v5, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v9
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v3, 0xffff, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v7, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v7, 16, v23
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB111_3
+; GFX11-FAKE16-NEXT: .LBB111_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v17
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v20, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v19, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v14, v6
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v7, 0xffff, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: .LBB111_3: ; %end
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB111_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-FAKE16-NEXT: s_branch .LBB111_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
index b622e6e..9041f64 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
@@ -19562,212 +19562,421 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_branch .LBB51_2
;
-; GFX11-LABEL: bitcast_v40i8_to_v20i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
-; GFX11-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
-; GFX11-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v22, 8, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 8, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v31, 8, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 8, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 8, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v37, 8, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v19
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v23
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v22
-; GFX11-NEXT: v_and_b32_e64 v2, 0xffff, s10
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v30
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v24
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v28
-; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v26
-; GFX11-NEXT: v_or_b32_e32 v2, v3, v31
-; GFX11-NEXT: v_or_b32_e32 v3, v5, v34
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v29
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v27
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v33
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v25
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v18
-; GFX11-NEXT: v_or_b32_e32 v9, v6, v35
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v16
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v36
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v37
-; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v20
-; GFX11-NEXT: v_or_b32_e32 v8, v8, v19
-; GFX11-NEXT: v_or_b32_e32 v12, v6, v17
-; GFX11-NEXT: v_lshl_or_b32 v6, v0, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v32
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v7
-; GFX11-NEXT: v_or_b32_e32 v10, v10, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v7, v9, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v8, v12, 16, v13
-; GFX11-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-NEXT: v_lshl_or_b32 v9, v10, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
-; GFX11-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v27
-; GFX11-NEXT: s_add_i32 s28, s28, 3
-; GFX11-NEXT: s_lshl_b32 s5, s29, 8
-; GFX11-NEXT: s_and_b32 s4, s28, 0xff
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_and_b32 s5, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-NEXT: s_add_i32 s26, s26, 3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: s_or_b32 s5, s6, s5
-; GFX11-NEXT: s_and_b32 s6, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s27, 8
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: s_or_b32 s6, s7, s6
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v28
-; GFX11-NEXT: v_or_b32_e32 v4, v36, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v26
-; GFX11-NEXT: s_or_b32 s7, s8, s7
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-NEXT: s_add_i32 s16, s16, 3
-; GFX11-NEXT: s_or_b32 s8, s9, s8
-; GFX11-NEXT: s_and_b32 s9, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s17, 8
-; GFX11-NEXT: s_add_i32 s18, s18, 3
-; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_add_i32 s2, s2, 3
-; GFX11-NEXT: s_or_b32 s9, s10, s9
-; GFX11-NEXT: s_and_b32 s10, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s19, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x300, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v7
-; GFX11-NEXT: s_or_b32 s10, s11, s10
-; GFX11-NEXT: s_or_b32 s0, s1, s0
-; GFX11-NEXT: s_or_b32 s1, s3, s2
-; GFX11-NEXT: s_addk_i32 s5, 0x300
-; GFX11-NEXT: s_addk_i32 s6, 0x300
-; GFX11-NEXT: s_addk_i32 s9, 0x300
-; GFX11-NEXT: s_addk_i32 s10, 0x300
-; GFX11-NEXT: s_addk_i32 s0, 0x300
-; GFX11-NEXT: s_addk_i32 s1, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v29
-; GFX11-NEXT: v_or_b32_e32 v5, v35, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, v33, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v23
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s9, s10
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v18
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v25
-; GFX11-NEXT: s_addk_i32 s7, 0x300
-; GFX11-NEXT: s_addk_i32 s8, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v20
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x300, v5
-; GFX11-NEXT: v_or_b32_e32 v5, v34, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v16
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v19, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v37, v2
-; GFX11-NEXT: v_or_b32_e32 v7, v32, v7
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, v22, v4
-; GFX11-NEXT: s_addk_i32 s4, 0x300
-; GFX11-NEXT: v_or_b32_e32 v1, v21, v1
-; GFX11-NEXT: v_or_b32_e32 v3, v17, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-NEXT: v_or_b32_e32 v6, v31, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-NEXT: v_and_b32_e64 v10, 0xffff, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v5, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v6, v11, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v7, v8, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v3, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v0
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: .LBB51_3: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-LABEL: bitcast_v40i8_to_v20i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 8, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 8, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v34, 8, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v33, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v36, 8, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v35, 8, v13
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v37, 8, v15
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v19
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v21
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v30
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v18
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v23
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v31
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v9, v19
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v1, v36
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v20
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v21
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v34
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v28
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v35
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v25
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v3, v37
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v25
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v37, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v17, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v36, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v24
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v33, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v31, v10
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v20
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v18
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v34, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v19, v0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v32, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v28
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v11.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v10.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v22, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v35, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB51_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v40i8_to_v20i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v22, 8, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v32, 8, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 8, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 8, v13
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v37, 8, v15
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v19
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v21
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v23
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v2, 0xffff, s10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v30
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v24
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v3, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v5, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, v6, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v5, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v7, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, v8, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, v6, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v0, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v32
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v10, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v9, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v12, 16, v13
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v10, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v27
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v28
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v36, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v26
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v7
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v29
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v35, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v33, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v23
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v18
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v25
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v34, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v19, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v37, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v32, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v22, v4
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v21, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v17, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v31, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v10, 0xffff, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v11, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: .LBB51_3: ; %end
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB51_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+; GFX11-FAKE16-NEXT: s_branch .LBB51_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25600,212 +25809,421 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_branch .LBB63_2
;
-; GFX11-LABEL: bitcast_v40i8_to_v20f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
-; GFX11-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
-; GFX11-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v22, 8, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 8, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v31, 8, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 8, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 8, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v37, 8, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v19
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v23
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v22
-; GFX11-NEXT: v_and_b32_e64 v2, 0xffff, s10
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v30
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v24
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v28
-; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v26
-; GFX11-NEXT: v_or_b32_e32 v2, v3, v31
-; GFX11-NEXT: v_or_b32_e32 v3, v5, v34
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v29
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v27
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v33
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v25
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v18
-; GFX11-NEXT: v_or_b32_e32 v9, v6, v35
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v16
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v36
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v37
-; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v20
-; GFX11-NEXT: v_or_b32_e32 v8, v8, v19
-; GFX11-NEXT: v_or_b32_e32 v12, v6, v17
-; GFX11-NEXT: v_lshl_or_b32 v6, v0, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v32
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v7
-; GFX11-NEXT: v_or_b32_e32 v10, v10, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v7, v9, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v8, v12, 16, v13
-; GFX11-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-NEXT: v_lshl_or_b32 v9, v10, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_3
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v27
-; GFX11-NEXT: s_add_i32 s28, s28, 3
-; GFX11-NEXT: s_lshl_b32 s5, s29, 8
-; GFX11-NEXT: s_and_b32 s4, s28, 0xff
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_and_b32 s5, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-NEXT: s_add_i32 s26, s26, 3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: s_or_b32 s5, s6, s5
-; GFX11-NEXT: s_and_b32 s6, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s27, 8
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: s_or_b32 s6, s7, s6
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v28
-; GFX11-NEXT: v_or_b32_e32 v4, v36, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v26
-; GFX11-NEXT: s_or_b32 s7, s8, s7
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-NEXT: s_add_i32 s16, s16, 3
-; GFX11-NEXT: s_or_b32 s8, s9, s8
-; GFX11-NEXT: s_and_b32 s9, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s17, 8
-; GFX11-NEXT: s_add_i32 s18, s18, 3
-; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_add_i32 s2, s2, 3
-; GFX11-NEXT: s_or_b32 s9, s10, s9
-; GFX11-NEXT: s_and_b32 s10, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s19, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x300, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v7
-; GFX11-NEXT: s_or_b32 s10, s11, s10
-; GFX11-NEXT: s_or_b32 s0, s1, s0
-; GFX11-NEXT: s_or_b32 s1, s3, s2
-; GFX11-NEXT: s_addk_i32 s5, 0x300
-; GFX11-NEXT: s_addk_i32 s6, 0x300
-; GFX11-NEXT: s_addk_i32 s9, 0x300
-; GFX11-NEXT: s_addk_i32 s10, 0x300
-; GFX11-NEXT: s_addk_i32 s0, 0x300
-; GFX11-NEXT: s_addk_i32 s1, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v29
-; GFX11-NEXT: v_or_b32_e32 v5, v35, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, v33, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v23
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s9, s10
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v18
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v25
-; GFX11-NEXT: s_addk_i32 s7, 0x300
-; GFX11-NEXT: s_addk_i32 s8, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v20
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x300, v5
-; GFX11-NEXT: v_or_b32_e32 v5, v34, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v16
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v19, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v37, v2
-; GFX11-NEXT: v_or_b32_e32 v7, v32, v7
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, v22, v4
-; GFX11-NEXT: s_addk_i32 s4, 0x300
-; GFX11-NEXT: v_or_b32_e32 v1, v21, v1
-; GFX11-NEXT: v_or_b32_e32 v3, v17, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-NEXT: v_or_b32_e32 v6, v31, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-NEXT: v_and_b32_e64 v10, 0xffff, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v5, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v6, v11, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v7, v8, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v3, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v0
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: .LBB63_3: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-NEXT: s_branch .LBB63_2
+; GFX11-TRUE16-LABEL: bitcast_v40i8_to_v20f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 8, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 8, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v34, 8, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v33, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v36, 8, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v35, 8, v13
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v37, 8, v15
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v19
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v21
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v30
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v18
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v23
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v31
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v9, v19
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v1, v36
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v20
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v21
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v34
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v28
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v35
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v25
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v3, v37
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_3
+; GFX11-TRUE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v25
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v37, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v17, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v36, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v24
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v33, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v31, v10
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v20
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v18
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v34, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v19, v0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v32, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v28
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v11.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v10.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v22, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v35, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-TRUE16-NEXT: .LBB63_3: ; %end
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB63_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+; GFX11-TRUE16-NEXT: s_branch .LBB63_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v40i8_to_v20f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v22, 8, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v32, 8, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 8, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 8, v13
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v37, 8, v15
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v19
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v21
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v23
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v2, 0xffff, s10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v30
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v24
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v3, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v5, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, v6, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v5, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v7, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, v8, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, v6, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v0, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v32
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v10, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v9, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v12, 16, v13
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v10, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB63_3
+; GFX11-FAKE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v27
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v28
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v36, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v26
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v7
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v29
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v35, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v33, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v23
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v18
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v25
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v34, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v19, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v37, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v32, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v22, v4
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v21, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v17, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v31, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v10, 0xffff, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v11, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: .LBB63_3: ; %end
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB63_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+; GFX11-FAKE16-NEXT: s_branch .LBB63_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
index e6c7b1a..73b57a5 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
@@ -1482,46 +1482,87 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB15_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB15_2
+; GFX11-TRUE16-NEXT: .LBB15_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-FAKE16-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB15_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB15_2
+; GFX11-FAKE16-NEXT: .LBB15_4:
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3720,46 +3761,87 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB35_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB35_2
+; GFX11-TRUE16-NEXT: .LBB35_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-FAKE16-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB35_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB35_2
+; GFX11-FAKE16-NEXT: .LBB35_4:
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5424,27 +5506,24 @@ define <2 x i16> @bitcast_v2bf16_to_v2i16(<2 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB50_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_lshlrev_b32 v0, 16, v0
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_add_f32 v0, 0x40c00000, v0
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h
; GFX11-TRUE16-NEXT: .LBB50_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -5592,44 +5671,81 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_v2i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
-; GFX11-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_v2i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_add_nc_u32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB51_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-NEXT: .LBB51_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_v2i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB51_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB51_2
+; GFX11-FAKE16-NEXT: .LBB51_4:
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7223,46 +7339,87 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_v2f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
-; GFX11-NEXT: .LBB63_4:
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_v2f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-TRUE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB63_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB63_2
+; GFX11-TRUE16-NEXT: .LBB63_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_v2f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-FAKE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB63_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB63_2
+; GFX11-FAKE16-NEXT: .LBB63_4:
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8653,46 +8810,87 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_v1i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
-; GFX11-NEXT: .LBB73_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
-; GFX11-NEXT: .LBB73_4:
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_v1i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-TRUE16-NEXT: .LBB73_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB73_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB73_2
+; GFX11-TRUE16-NEXT: .LBB73_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_v1i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-FAKE16-NEXT: .LBB73_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB73_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB73_2
+; GFX11-FAKE16-NEXT: .LBB73_4:
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9258,57 +9456,109 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_v4i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s2, s0, 24
-; GFX11-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-NEXT: s_lshr_b32 s3, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
-; GFX11-NEXT: .LBB77_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v1, v2, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: s_branch .LBB77_2
-; GFX11-NEXT: .LBB77_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s2
-; GFX11-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_v4i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s2, s0, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-TRUE16-NEXT: .LBB77_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 24, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB77_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr3
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr2
+; GFX11-TRUE16-NEXT: s_branch .LBB77_2
+; GFX11-TRUE16-NEXT: .LBB77_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_v4i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s2, s0, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-FAKE16-NEXT: .LBB77_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 24, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB77_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr3
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr2
+; GFX11-FAKE16-NEXT: s_branch .LBB77_2
+; GFX11-FAKE16-NEXT: .LBB77_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
index acc0247..d5d2d4aa 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
@@ -374,59 +374,112 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v3bf16_to_v3f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s2, s0, 16
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v5, v8 :: v_dual_and_b32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v2, 16, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: s_branch .LBB1_2
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v3bf16_to_v3f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB1_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-TRUE16-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0x7fc0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB1_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB1_2
+; GFX11-TRUE16-NEXT: .LBB1_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v3bf16_to_v3f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB1_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-FAKE16-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s0, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v5, v8 :: v_dual_and_b32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB1_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB1_2
+; GFX11-FAKE16-NEXT: .LBB1_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -803,38 +856,36 @@ define <3 x i16> @bitcast_v3bf16_to_v3i16(<3 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v1, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v7, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, 0x7fc0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h
; GFX11-TRUE16-NEXT: .LBB4_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -1025,56 +1076,105 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v3bf16_to_v3i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s2, s0, 16
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v8, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v2, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: s_branch .LBB5_2
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v3bf16_to_v3i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB5_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-TRUE16-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v7 :: v_dual_add_nc_u32 v4, v4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0x7fc0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB5_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB5_2
+; GFX11-TRUE16-NEXT: .LBB5_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v3bf16_to_v3i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB5_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-FAKE16-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s0, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v2, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB5_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB5_2
+; GFX11-FAKE16-NEXT: .LBB5_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
index bff054f..ee23420 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
@@ -7351,360 +7351,696 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v16i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s2, s26, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_and_b32 s1, s25, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s3, s25, 16
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s24, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v6
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v14, v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s1, s22, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v13, v0, 16, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: s_lshl_b32 s1, s21, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v12, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s20, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_lshl_or_b32 v11, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v10, v0, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v8, v7
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v16, v4, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v16, v4
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v6, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s18, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v8, v2, 16, v0
-; GFX11-NEXT: s_and_b32 s1, s17, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v6, v4
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v7, v5
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v17, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v17, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v19, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_lshl_b32 s1, s16, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v18, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v16, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v7, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v17, v18
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v6, v16
-; GFX11-NEXT: v_lshl_or_b32 v6, v2, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_lshl_b32 s1, s15, 16
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v3, v17, 16, 1
-; GFX11-NEXT: v_bfe_u32 v18, v16, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v17
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v18, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v16
-; GFX11-NEXT: v_bfe_u32 v19, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_bfe_u32 v20, v18, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v21
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v19, v21, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v23, v17, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v19, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, v20, v18
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v17
-; GFX11-NEXT: v_bfe_u32 v24, v22, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v23, v23, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v24, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
-; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v18, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v16i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v5, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v2, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v17, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v17 :: v_dual_add_nc_u32 v3, v3, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v17, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v19, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v17, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, v19, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v18, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v19 :: v_dual_add_nc_u32 v19, v21, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, v22, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v18, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v21, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v18, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB23_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB23_2
+; GFX11-TRUE16-NEXT: .LBB23_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v16i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s26, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s25, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s24, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s22, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s21, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s20, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s18, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v2, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v6, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v7, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v17, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v19, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v17, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v6, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s15, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v18, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v21, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v19, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, v20, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v22, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, v23, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v24, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v18, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v16, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB23_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB23_2
+; GFX11-FAKE16-NEXT: .LBB23_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21906,360 +22242,696 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v16f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s2, s26, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_and_b32 s1, s25, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s3, s25, 16
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s24, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v6
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v14, v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s1, s22, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v13, v0, 16, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: s_lshl_b32 s1, s21, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v12, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s20, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_lshl_or_b32 v11, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v10, v0, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v8, v7
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v16, v4, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v16, v4
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v6, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s18, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v8, v2, 16, v0
-; GFX11-NEXT: s_and_b32 s1, s17, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v6, v4
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v7, v5
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v17, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v17, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v19, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_lshl_b32 s1, s16, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v18, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v16, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v7, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v17, v18
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v6, v16
-; GFX11-NEXT: v_lshl_or_b32 v6, v2, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_lshl_b32 s1, s15, 16
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v3, v17, 16, 1
-; GFX11-NEXT: v_bfe_u32 v18, v16, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v17
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v18, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v16
-; GFX11-NEXT: v_bfe_u32 v19, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_bfe_u32 v20, v18, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v21
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v19, v21, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v23, v17, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v19, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, v20, v18
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v17
-; GFX11-NEXT: v_bfe_u32 v24, v22, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v23, v23, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v24, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
-; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v18, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v16f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v5, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v2, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v17, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v17 :: v_dual_add_nc_u32 v3, v3, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v17, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v19, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v17, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, v19, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v18, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v19 :: v_dual_add_nc_u32 v19, v21, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, v22, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v18, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v21, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v18, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB47_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v16f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s26, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s25, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s24, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s22, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s21, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s20, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s18, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v2, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v6, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v7, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v17, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v19, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v17, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v6, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s15, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v18, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v21, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v19, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, v20, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v22, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, v23, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v24, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v18, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v16, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB47_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: .LBB47_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35969,360 +36641,696 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v8i64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s2, s26, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_and_b32 s1, s25, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s3, s25, 16
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s24, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v6
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v14, v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s1, s22, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v13, v0, 16, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: s_lshl_b32 s1, s21, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v12, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s20, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_lshl_or_b32 v11, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v10, v0, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v8, v7
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v16, v4, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v16, v4
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v6, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s18, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v8, v2, 16, v0
-; GFX11-NEXT: s_and_b32 s1, s17, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v6, v4
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v7, v5
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v17, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v17, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v19, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_lshl_b32 s1, s16, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v18, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v16, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v7, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v17, v18
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v6, v16
-; GFX11-NEXT: v_lshl_or_b32 v6, v2, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_lshl_b32 s1, s15, 16
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v3, v17, 16, 1
-; GFX11-NEXT: v_bfe_u32 v18, v16, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v17
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v18, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v16
-; GFX11-NEXT: v_bfe_u32 v19, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_bfe_u32 v20, v18, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v21
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v19, v21, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v23, v17, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v19, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, v20, v18
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v17
-; GFX11-NEXT: v_bfe_u32 v24, v22, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v23, v23, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v24, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
-; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v18, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
-; GFX11-NEXT: .LBB67_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v8i64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v5, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v2, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v17, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v17 :: v_dual_add_nc_u32 v3, v3, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v17, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v19, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v17, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, v19, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v18, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v19 :: v_dual_add_nc_u32 v19, v21, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, v22, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v18, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v21, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v18, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB67_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB67_2
+; GFX11-TRUE16-NEXT: .LBB67_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v8i64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s26, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s25, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s24, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s22, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s21, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s20, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s18, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v2, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v6, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v7, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v17, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v19, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v17, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v6, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s15, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v18, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v21, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v19, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, v20, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v22, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, v23, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v24, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v18, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v16, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB67_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB67_2
+; GFX11-FAKE16-NEXT: .LBB67_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -49092,360 +50100,696 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v8f64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s2, s26, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_and_b32 s1, s25, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s3, s25, 16
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s24, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v6
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v14, v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s1, s22, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v13, v0, 16, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: s_lshl_b32 s1, s21, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v12, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s20, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_lshl_or_b32 v11, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v10, v0, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v8, v7
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v16, v4, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v16, v4
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v6, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s18, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v8, v2, 16, v0
-; GFX11-NEXT: s_and_b32 s1, s17, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v6, v4
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v7, v5
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v17, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v17, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v19, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_lshl_b32 s1, s16, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v18, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v16, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v7, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v17, v18
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v6, v16
-; GFX11-NEXT: v_lshl_or_b32 v6, v2, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_lshl_b32 s1, s15, 16
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v3, v17, 16, 1
-; GFX11-NEXT: v_bfe_u32 v18, v16, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v17
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v18, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v16
-; GFX11-NEXT: v_bfe_u32 v19, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_bfe_u32 v20, v18, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v21
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v19, v21, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v23, v17, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v19, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, v20, v18
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v17
-; GFX11-NEXT: v_bfe_u32 v24, v22, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v23, v23, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v24, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
-; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v18, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v8f64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v5, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v2, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v17, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v17 :: v_dual_add_nc_u32 v3, v3, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v17, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v19, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v17, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, v19, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v18, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v19 :: v_dual_add_nc_u32 v19, v21, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, v22, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v18, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v21, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v18, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB83_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB83_2
+; GFX11-TRUE16-NEXT: .LBB83_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v8f64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s26, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s25, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s24, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s22, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s21, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s20, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s18, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v2, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v6, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v7, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v17, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v19, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v17, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v6, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s15, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v18, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v21, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v19, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, v20, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v22, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, v23, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v24, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v18, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v16, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB83_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB83_2
+; GFX11-FAKE16-NEXT: .LBB83_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -60096,298 +61440,258 @@ define <32 x i16> @bitcast_v32bf16_to_v32i16(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v1
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_lshlrev_b32 v26, 16, v7
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v16, 0x40c00000, v16
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v18, 0x40c00000, v0 :: v_dual_lshlrev_b32 v19, 16, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v28, 16, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v26, 16, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v28, 16, v8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_add_f32 v16, 0x40c00000, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v30, 16, v10
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v16, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v16
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v18
-; GFX11-TRUE16-NEXT: v_add3_u32 v21, v21, v17, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v0, v0, v16, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_add3_u32 v22, v22, v17, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v16, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v17
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v30, 16, v11
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 0x40c00000, v19 :: v_dual_cndmask_b32 v0, v0, v22
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v18, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v18, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v20, 16, 1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v25, 16, v6
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v27, 16, v8
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v19, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_lshlrev_b32 v18, 16, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v21, v21, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v21, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v20
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v29, 16, v10
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v12
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 0x40c00000, v13 :: v_dual_cndmask_b32 v2, v21, v16
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v22
-; GFX11-TRUE16-NEXT: v_add3_u32 v16, v18, v20, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v19, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v23, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v16.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v22, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v19, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v22, v18, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v17.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v19, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v22, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v22, v23, v20, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v20
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v19, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v3, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v13
-; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v21, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v16, v17, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v17, v18, v19, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v19
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v21
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v17, v18, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v22 :: v_dual_add_f32 v3, 0x40c00000, v3
-; GFX11-TRUE16-NEXT: v_add3_u32 v18, v20, v21, 0x7fff
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v22, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v18, v19, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-TRUE16-NEXT: v_add3_u32 v19, v20, v22, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v22
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v16.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v19, v20, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v20, v21, v3, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v18.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v22, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v21, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v20, v21, vcc_lo
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 0x40c00000, v23 :: v_dual_add_f32 v4, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v23, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v23
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-TRUE16-NEXT: v_add3_u32 v20, v22, v23, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v4, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v20, v21, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v21, v22, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v22 :: v_dual_cndmask_b32 v3, v19, v23
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v24, v21, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21
+; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v22, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v20.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v21, v24, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v4
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v21, v22 :: v_dual_and_b32 v5, 0xffff0000, v5
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v24, 0x40c00000, v24 :: v_dual_add_f32 v5, 0x40c00000, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v23, v24, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v24
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-TRUE16-NEXT: v_add3_u32 v21, v23, v24, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v23, v5, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v21, v21, v22, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v22, v23, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v24, v25, v22, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v21, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v23, 0x40c00000, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v19.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v21, v21, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v22, v24, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v26, v23, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v22, v23, vcc_lo
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 0x40c00000, v25 :: v_dual_add_f32 v6, 0x40c00000, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v5.h
-; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v25, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v25
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v22, v24, v25, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v22, v22, v23, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v23, v24, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v24, 0x40c00000, v24 :: v_dual_cndmask_b32 v5, v21, v25
+; GFX11-TRUE16-NEXT: v_add3_u32 v21, v26, v23, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11-TRUE16-NEXT: v_bfe_u32 v26, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-TRUE16-NEXT: v_bfe_u32 v27, v24, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v23, v26, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v21, v21, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v6
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v6, v23, v24 :: v_dual_and_b32 v7, 0xffff0000, v7
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v26, 0x40c00000, v26 :: v_dual_add_f32 v7, 0x40c00000, v7
-; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v26, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v26
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v23, v25, v26, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v7, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v23, v24, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v24, v25, v7, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add3_u32 v26, v27, v24, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v23, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v23, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v25, 0x40c00000, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v21.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v23, v23, v7, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v24, v26, v27, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v26, 16, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v28, v25, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v24, v25 :: v_dual_and_b32 v8, 0xffff0000, v8
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 0x40c00000, v27 :: v_dual_add_f32 v8, 0x40c00000, v8
-; GFX11-TRUE16-NEXT: v_bfe_u32 v26, v27, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v27
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v24, v26, v27, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v26, v8, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v24, v24, v25, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v25, v26, v8, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v26, 0x40c00000, v26 :: v_dual_cndmask_b32 v7, v23, v27
+; GFX11-TRUE16-NEXT: v_add3_u32 v23, v28, v25, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v25
+; GFX11-TRUE16-NEXT: v_bfe_u32 v28, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-TRUE16-NEXT: v_bfe_u32 v29, v26, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v25, v28, v8, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v23, v27, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v8
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v8, v25, v26 :: v_dual_and_b32 v9, 0xffff0000, v9
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v28, 0x40c00000, v28 :: v_dual_add_f32 v9, 0x40c00000, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v27, v28, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v28
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v25, v27, v28, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v27, v9, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v25, v26, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v26, v27, v9, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_add3_u32 v28, v29, v26, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v26
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v25, v27, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v27, 0x40c00000, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v22.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v25, v25, v9, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v28, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v30, v27, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v26, v27 :: v_dual_and_b32 v10, 0xffff0000, v10
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v29, 0x40c00000, v29 :: v_dual_add_f32 v10, 0x40c00000, v10
-; GFX11-TRUE16-NEXT: v_bfe_u32 v28, v29, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v29
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v26, v28, v29, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v28, v10, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v26, v27, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v27, v28, v10, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v28, 0x40c00000, v28 :: v_dual_cndmask_b32 v9, v25, v29
+; GFX11-TRUE16-NEXT: v_add3_u32 v25, v30, v27, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v27
+; GFX11-TRUE16-NEXT: v_bfe_u32 v30, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-TRUE16-NEXT: v_bfe_u32 v31, v28, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v11, 0x40c00000, v11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v27, v30, v10, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v25, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v10
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v10, v27, v28 :: v_dual_and_b32 v11, 0xffff0000, v11
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v30, 0x40c00000, v30 :: v_dual_add_f32 v11, 0x40c00000, v11
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v10.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v29, v30, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v30
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v26
-; GFX11-TRUE16-NEXT: v_add3_u32 v27, v29, v30, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v29, v11, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v6, 16, v10
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v27, v28, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v28, v29, v11, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_add3_u32 v30, v31, v28, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v28
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v27, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v27, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v29, 0x40c00000, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v24.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v27, v27, v11, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v30, v31, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v29, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v11, v28, v29 :: v_dual_and_b32 v12, 0xffff0000, v12
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v31, 0x40c00000, v31 :: v_dual_add_f32 v12, 0x40c00000, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v11.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v30, v31, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v28, v12, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v27
-; GFX11-TRUE16-NEXT: v_add3_u32 v29, v30, v31, 0x7fff
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v30, 0x40c00000, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
-; GFX11-TRUE16-NEXT: v_add3_u32 v28, v28, v12, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v31, v13, 16, 1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v5, 16, v11
-; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v30, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v29, v29, v32, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v27, v30, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v27, 0x40c00000, v31
+; GFX11-TRUE16-NEXT: v_add3_u32 v30, v32, v29, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-TRUE16-NEXT: v_bfe_u32 v33, v27, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v27
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v29, v30, v31, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v30, v32, v12, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v12
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-TRUE16-NEXT: v_add3_u32 v31, v31, v13, 0x7fff
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v6.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v22
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v28, v33, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v14
-; GFX11-TRUE16-NEXT: v_add3_u32 v28, v34, v30, 0x7fff
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff0000, v15
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v13, v31, v35 :: v_dual_add_f32 v32, 0x40c00000, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v34 :: v_dual_add_f32 v15, 0x40c00000, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v13.h
-; GFX11-TRUE16-NEXT: v_bfe_u32 v36, v32, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v12.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v34, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v31, v15, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v15
-; GFX11-TRUE16-NEXT: v_add3_u32 v35, v36, v32, 0x7fff
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v29
-; GFX11-TRUE16-NEXT: v_add3_u32 v31, v31, v15, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v32, v33, v27, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v13, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v5, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v16, 16, v21
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v4, 16, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v7.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v30, v31, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v30, 0x40c00000, v33
+; GFX11-TRUE16-NEXT: v_add3_u32 v31, v35, v13, 0x7fff
; GFX11-TRUE16-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v23
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v18.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v1.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v14, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v48, 0x400000, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v4, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v21, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v36, v37, v14, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v38, v34, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v34
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v37, v38, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v34.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v31, v39, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v36, v48, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v1, 16, v15
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v14.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v35, v31, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v29.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v32, v34, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v34, 16, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v33, v30, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v26.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v31, v32, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v34
+; GFX11-TRUE16-NEXT: v_add3_u32 v32, v33, v30, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
+; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v14, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v31
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v28, v33, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v1, 16, v14
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v28
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v9.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v3, 16, v13
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v8.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v24
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v1, 16, v9
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v3, 16, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v16, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v1, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v18, 16, v20
+; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v15, 0x40c00000, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v27.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v30, v32, v33, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v33, v34, v14, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v34, v35, v31, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v31
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v15
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v34, v35, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_add3_u32 v32, v32, v15, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v33, v36, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v30.h
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v3, 16, v19
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v32, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v31.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v28.h
; GFX11-TRUE16-NEXT: .LBB94_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -61605,325 +62909,620 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v32i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s12, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s13, 16
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: s_and_b32 s2, s14, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: s_lshl_b32 s1, s27, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v8, v3, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v5
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v3
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v7
-; GFX11-NEXT: v_bfe_u32 v7, v9, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_lshl_b32 s0, s15, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v8, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v5, v6, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v4
-; GFX11-NEXT: v_bfe_u32 v7, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s16, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v10
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_and_b32 s0, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v6, v8 :: v_dual_add_nc_u32 v7, v9, v5
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v10
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v7, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v6
-; GFX11-NEXT: v_bfe_u32 v9, v12, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v5, v7, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v12
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v21, v7, v8, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v12, v7, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v8, v10 :: v_dual_add_nc_u32 v8, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v7
-; GFX11-NEXT: v_bfe_u32 v12, v13, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s20, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v13
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v14, v9, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v7, v10, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v9
-; GFX11-NEXT: v_bfe_u32 v14, v15, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v14, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v10, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_lshl_b32 s0, s21, 16
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v23, 0x40c00000, s0
-; GFX11-NEXT: v_dual_cndmask_b32 v24, v11, v12 :: v_dual_add_nc_u32 v9, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v15
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v10
-; GFX11-NEXT: v_bfe_u32 v13, v23, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s22, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v23
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v15, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v13
-; GFX11-NEXT: v_bfe_u32 v13, v25, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v12, v14, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v15, v11
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v23
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_bfe_u32 v28, v14, 16, 1
-; GFX11-NEXT: s_lshl_b32 s0, s23, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v23, v10, v15, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v25
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v12, v27, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v28, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v28, 0x400000, v14
-; GFX11-NEXT: v_bfe_u32 v29, v15, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v11, v13 :: v_dual_add_nc_u32 v12, 0x7fff, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: s_lshl_b32 s0, s24, 16
-; GFX11-NEXT: v_bfe_u32 v13, v27, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s25, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v25, v12, v28 :: v_dual_add_nc_u32 v12, v29, v15
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v27
-; GFX11-NEXT: v_add_f32_e64 v28, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v15
-; GFX11-NEXT: v_bfe_u32 v30, v14, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v27
-; GFX11-NEXT: v_bfe_u32 v32, v28, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v12, v12, v29 :: v_dual_add_nc_u32 v15, v30, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-NEXT: v_or_b32_e32 v30, 0x400000, v14
-; GFX11-NEXT: s_lshl_b32 s0, s25, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v27, v13, v31, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v32, v28
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v28
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v32, v29, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v15, v30 :: v_dual_add_nc_u32 v13, 0x7fff, v13
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: v_add_f32_e64 v33, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s26, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v32, v29
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v13, v31, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v31, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s0, s27, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v28, v33, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v32, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v34, v30, 16, 1
-; GFX11-NEXT: v_bfe_u32 v35, v31, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v31
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_bfe_u32 v37, v32, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v34, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v35, v35, v31
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v30
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v37, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v28, v28, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v29
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v35, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v34, v48, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v37, v49, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: v_and_or_b32 v7, 0xffff0000, v7, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v29, v15, v36, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_and_or_b32 v15, 0xffff0000, v32, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v28, v38, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v29
-; GFX11-NEXT: v_and_or_b32 v12, 0xffff0000, v27, v31
-; GFX11-NEXT: v_and_or_b32 v11, 0xffff0000, v25, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v9
-; GFX11-NEXT: v_and_or_b32 v14, 0xffff0000, v28, v30
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v6
-; GFX11-NEXT: v_and_or_b32 v9, 0xffff0000, v26, v23
-; GFX11-NEXT: v_and_or_b32 v8, 0xffff0000, v24, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v2
-; GFX11-NEXT: v_and_or_b32 v6, 0xffff0000, v5, v27
-; GFX11-NEXT: v_and_or_b32 v5, 0xffff0000, v21, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; GFX11-NEXT: v_and_or_b32 v13, 0xffff0000, v13, v29
-; GFX11-NEXT: v_and_or_b32 v10, 0xffff0000, v10, v33
-; GFX11-NEXT: v_and_or_b32 v4, 0xffff0000, v19, v20
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v18, v21
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v17, v22
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v16, v23
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v24
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
-; GFX11-NEXT: .LBB95_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v32i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s12, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v16.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v2, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v2, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v18, v3, v7 :: v_dual_add_nc_u32 v5, v5, v10
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v11
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v4, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v5, v9 :: v_dual_add_nc_u32 v5, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v12
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v20.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v6, v11 :: v_dual_add_nc_u32 v6, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v21, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v6, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v10
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v6, v7, v11 :: v_dual_add_nc_u32 v7, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v22, v7, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v21.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v22.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v8, v13 :: v_dual_add_nc_u32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v24, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v8, v11
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v10, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v8, v9, v13 :: v_dual_add_nc_u32 v9, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v24, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v13, v24
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v9, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v25, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v26.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v10, v15 :: v_dual_add_nc_u32 v10, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v25
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v24, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v25
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, v10, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v28, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v24.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v11, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v15, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, v25, v28
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v30, v12, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v27, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v25
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v28
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v11, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v12, v27
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v27
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v13, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v25, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v13, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, 0x400000, v25
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v25
+; GFX11-TRUE16-NEXT: v_bfe_u32 v27, v31, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, v27, v31
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v28.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v29, v13, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v29.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v13, v14, v32 :: v_dual_add_nc_u32 v14, 0x7fff, v27
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v33, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v27, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v36, v33, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v14, v32, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v34, v27
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v35, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, v36, v33
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, v25, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v35, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v33
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v14, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x7fff, v25
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, v32, v35
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v35
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v34, v38, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v17.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v25, v36, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v27.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v32, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v33.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB95_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB95_2
+; GFX11-TRUE16-NEXT: .LBB95_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v32i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s12, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s13, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s27, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v6, v8 :: v_dual_add_nc_u32 v7, v9, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v5, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v21, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v8, v10 :: v_dual_add_nc_u32 v8, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v7, v10, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v15, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v14, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v10, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v24, v11, v12 :: v_dual_add_nc_u32 v9, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v23, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v23
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v12, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v15, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v23
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v23, v10, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v12, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v28, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v29, v15, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v11, v13 :: v_dual_add_nc_u32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v27, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v25, v12, v28 :: v_dual_add_nc_u32 v12, v29, v15
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v27
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v14, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v27
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v28, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v12, v12, v29 :: v_dual_add_nc_u32 v15, v30, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v30, 0x400000, v14
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v27, v13, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v32, v28
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v28
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v15, v30 :: v_dual_add_nc_u32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v33, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v32, v29
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v13, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v32, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v31
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v34, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, v35, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v37, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, v28, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v35, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v37, v49, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: v_and_or_b32 v7, 0xffff0000, v7, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v29, v15, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_and_or_b32 v15, 0xffff0000, v32, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v28, v28, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v29
+; GFX11-FAKE16-NEXT: v_and_or_b32 v12, 0xffff0000, v27, v31
+; GFX11-FAKE16-NEXT: v_and_or_b32 v11, 0xffff0000, v25, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v9
+; GFX11-FAKE16-NEXT: v_and_or_b32 v14, 0xffff0000, v28, v30
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v6
+; GFX11-FAKE16-NEXT: v_and_or_b32 v9, 0xffff0000, v26, v23
+; GFX11-FAKE16-NEXT: v_and_or_b32 v8, 0xffff0000, v24, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v6, 0xffff0000, v5, v27
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, 0xffff0000, v21, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v13, 0xffff0000, v13, v29
+; GFX11-FAKE16-NEXT: v_and_or_b32 v10, 0xffff0000, v10, v33
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, 0xffff0000, v19, v20
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v18, v21
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v17, v22
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v16, v23
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v24
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB95_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB95_2
+; GFX11-FAKE16-NEXT: .LBB95_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -69965,170 +71564,106 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v31
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v31
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v48
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v82
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v36
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v6, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v24
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v7, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v18
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v8, 16, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v22
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v19
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v3, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v30
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v82
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v50
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v36
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v51
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v0, v53
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v20
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v3, v54
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v1, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v28
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v21
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v3, v23
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v65
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v11, v70
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v12, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v69
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v86, 0xff, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v14, v84
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v65
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v25
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v87, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, v12, v81
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v97, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v86, v86, v85
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v98, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v0, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v1, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v30
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v2, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v29
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v1, v66
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v69
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v64
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v71
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v84
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v3, v83
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB99_3
; GFX11-TRUE16-NEXT: .LBB99_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v65
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v16
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v71, v5
-; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v66, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v28
; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v29, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v24
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v27, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v25, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v23, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v18
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v55, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v54, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v17, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v36
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v53, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v33
; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
@@ -70137,11 +71672,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v31
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
@@ -70159,83 +71689,133 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v52, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v51, v4
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v50, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v49, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v35
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v82
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v84, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v83, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v30
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v70, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_add_nc_u32 v13, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v66, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v48, v8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v71, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v29, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v27, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v25, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v23, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v55, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v19, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v54, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v17, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v36
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v53, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v31
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v52, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v51, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v50, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v49, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v35
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v48, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v23
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v81, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v24.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v19
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v23, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v22, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v19, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v20
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v18, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v14, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v20.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v19, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v17.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v18.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s3
; GFX11-TRUE16-NEXT: .LBB99_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -73991,358 +75571,687 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v32f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s12, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s13, 16
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: s_and_b32 s2, s14, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v33, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v9, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v2 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v6, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s15, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v33
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v3, v4 :: v_dual_add_nc_u32 v3, v6, v7
-; GFX11-NEXT: v_bfe_u32 v4, v8, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v8
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v5
-; GFX11-NEXT: v_bfe_u32 v36, v33, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v3
-; GFX11-NEXT: v_bfe_u32 v3, v6, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v4, v4, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v3, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_lshl_b32 s0, s16, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v9, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_and_b32 s0, s17, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v4
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_bfe_u32 v10, v9, 16, 1
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v4, v6, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v9
-; GFX11-NEXT: v_bfe_u32 v11, v6, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v11, v6
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v7, v7, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v7
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: v_bfe_u32 v12, v10, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_bfe_u32 v13, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v13, v8
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s20, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v9, v9, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v8
-; GFX11-NEXT: v_bfe_u32 v12, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v9
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v10, v11, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v13
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s0, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_bfe_u32 v15, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v10, v10, v14 :: v_dual_add_nc_u32 v11, 0x7fff, v11
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v10
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v11, v14, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v13
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s22, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v14, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_bfe_u32 v14, v15, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v15
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v13, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v11
-; GFX11-NEXT: v_bfe_u32 v26, v14, 16, 1
-; GFX11-NEXT: s_lshl_b32 s0, s23, 16
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v26, v26, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v27, v12, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v13, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v26, v27, v12
-; GFX11-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v13, v13, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v12
-; GFX11-NEXT: v_bfe_u32 v26, v27, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v13
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s25, 0xffff0000
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v26, v27
-; GFX11-NEXT: v_bfe_u32 v15, v13, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v26, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-NEXT: s_lshl_b32 s0, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v13
-; GFX11-NEXT: v_bfe_u32 v30, v26, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v29 :: v_dual_add_nc_u32 v15, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v30, v30, v26
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_bfe_u32 v14, v27, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v15, v29, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v30
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v26
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v14, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v15, v15, v29, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s26, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_add_f32_e64 v35, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-NEXT: v_bfe_u32 v34, v29, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v37, 0x400000, v29
-; GFX11-NEXT: v_bfe_u32 v26, v30, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v14, v14, v32, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v34, v29
-; GFX11-NEXT: v_bfe_u32 v32, v35, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v36, v33
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: v_add_nc_u32_e32 v26, v26, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v32, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v27, v37 :: v_dual_add_nc_u32 v26, 0x7fff, v26
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v29, 0x7fff, v32
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v34, v38, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v29, v29, v32, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v29
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v26, v36, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v15, v30, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v11, v28, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v26
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v12, v31, 16, v30
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v34
-; GFX11-NEXT: v_lshl_or_b32 v14, v26, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v13, v32, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v5, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v9, v24, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v8, v23, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v7, v22, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v5, v21, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v3, v18, 16, v19
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v2, v17, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v1, v16, 16, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v23
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
-; GFX11-NEXT: .LBB103_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v32f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s12, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s13, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v9, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v6, v2 :: v_dual_add_nc_u32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v33, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v33, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v17.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v2, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, v34, v33
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v18.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v6, v7 :: v_dual_add_nc_u32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v4, v6 :: v_dual_add_nc_u32 v4, v7, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v10, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v5, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v4, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v6, v8 :: v_dual_add_nc_u32 v6, v9, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v12, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v7, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v6, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v10, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v22.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v8, v10 :: v_dual_add_nc_u32 v8, v11, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v10
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v9, v13 :: v_dual_add_nc_u32 v12, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v8, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v24.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v10, v12 :: v_dual_add_nc_u32 v10, v13, v15
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v26, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v10, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, v26, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v11, v11, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, v10, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v26
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v14, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v28, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v27.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v12, v12, v14 :: v_dual_add_nc_u32 v13, v15, v28
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v26, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v29, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v25.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v26
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v15, v29
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v30.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v14, v28, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, v32, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v14
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v15, v31, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v31, v26, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v28
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, v31, v26
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v29.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v14, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v31, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v36, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v15, v31
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v28, v35, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v33
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, v36, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v31
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v26
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v34, v35, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v23.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v33
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v15, v36, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v19.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v15
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v28, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v16.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v31.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB103_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB103_2
+; GFX11-TRUE16-NEXT: .LBB103_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v32f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s12, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s13, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v33, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v9, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v2 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v3, v4 :: v_dual_add_nc_u32 v3, v6, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v4, v4, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v3, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v9, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v4, v6, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v11, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v7, v7, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v13, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v9, v9, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v10, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v10, v10, v14 :: v_dual_add_nc_u32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v11, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v15, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v15
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v26, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, v26, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v27, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v13, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, v27, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v13, v13, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v26, v27, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v26, v27
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v26, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v29 :: v_dual_add_nc_u32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, v30, v26
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v27, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v15, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v26
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v14, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v15, v15, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v35, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v37, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v26, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v14, v14, v32, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v34, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v36, v33
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, v26, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v32, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v27, v37 :: v_dual_add_nc_u32 v26, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v29, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v34, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v29, v29, v32, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v29
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v26, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v30, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v28, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v31, 16, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v10, 16, v34
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v26, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v32, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v5, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v24, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v23, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v22, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v21, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v18, 16, v19
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v17, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v16, 16, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v23
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB103_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB103_2
+; GFX11-FAKE16-NEXT: .LBB103_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -81928,170 +83837,106 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v31
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v31
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v48
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v82
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v36
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v6, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v24
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v7, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v18
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v8, 16, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v22
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v19
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v3, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v30
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v82
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v50
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v36
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v51
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v0, v53
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v20
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v3, v54
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v1, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v28
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v21
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v3, v23
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v65
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v11, v70
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v12, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v69
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v86, 0xff, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v14, v84
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v65
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v25
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v87, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, v12, v81
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v97, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v86, v86, v85
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v98, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v0, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v1, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v30
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v2, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v29
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v1, v66
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v69
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v64
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v71
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v84
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v3, v83
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB107_3
; GFX11-TRUE16-NEXT: .LBB107_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v65
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v16
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v71, v5
-; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v66, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v28
; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v29, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v24
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v27, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v25, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v23, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v18
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v55, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v54, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v17, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v36
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v53, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v33
; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
@@ -82100,11 +83945,6 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v31
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
@@ -82122,83 +83962,133 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v52, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v51, v4
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v50, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v49, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v35
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v82
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v84, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v83, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v30
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v70, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_add_nc_u32 v13, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v66, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v48, v8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v71, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v29, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v27, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v25, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v23, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v55, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v19, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v54, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v17, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v36
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v53, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v31
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v52, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v51, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v50, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v49, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v35
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v48, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v23
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v81, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v24.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v19
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v23, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v22, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v19, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v20
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v18, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v14, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v20.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v19, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v17.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v18.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s3
; GFX11-TRUE16-NEXT: .LBB107_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -92186,170 +94076,106 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v31
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v31
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v48
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v82
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v36
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v6, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v24
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v7, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v18
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v8, 16, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v22
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v19
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v3, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v30
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v82
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v50
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v36
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v51
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v0, v53
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v20
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v3, v54
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v1, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v28
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v21
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v3, v23
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v65
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v11, v70
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v12, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v69
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v86, 0xff, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v14, v84
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v65
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v25
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v87, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, v12, v81
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v97, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v86, v86, v85
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v98, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v0, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v1, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v30
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v2, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v29
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v1, v66
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v69
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v64
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v71
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v84
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v3, v83
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB111_3
; GFX11-TRUE16-NEXT: .LBB111_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v65
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v16
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v71, v5
-; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v66, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v28
; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v29, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v24
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v27, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v25, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v23, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v18
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v55, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v54, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v17, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v36
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v53, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v33
; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
@@ -92358,11 +94184,6 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v31
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
@@ -92380,83 +94201,133 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v52, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v51, v4
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v50, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v49, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v35
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v82
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v84, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v83, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v30
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v70, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_add_nc_u32 v13, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v66, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v48, v8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v71, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v29, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v27, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v25, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v23, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v55, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v19, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v54, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v17, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v36
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v53, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v31
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v52, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v51, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v50, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v49, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v35
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v48, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v23
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v81, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v24.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v19
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v23, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v22, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v19, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v20
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v18, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v14, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v20.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v19, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v17.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v18.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s3
; GFX11-TRUE16-NEXT: .LBB111_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
index a1c0a87..5d4df4b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
@@ -10227,149 +10227,285 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr22
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v18f32_to_v36i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s6, s28, 16
-; GFX11-NEXT: s_lshr_b32 s7, s27, 16
-; GFX11-NEXT: s_lshr_b32 s8, s26, 16
-; GFX11-NEXT: s_lshr_b32 s9, s25, 16
-; GFX11-NEXT: s_lshr_b32 s10, s24, 16
-; GFX11-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-NEXT: s_lshr_b32 s12, s22, 16
-; GFX11-NEXT: s_lshr_b32 s13, s21, 16
-; GFX11-NEXT: s_lshr_b32 s14, s20, 16
-; GFX11-NEXT: s_lshr_b32 s15, s19, 16
-; GFX11-NEXT: s_lshr_b32 s40, s18, 16
-; GFX11-NEXT: s_lshr_b32 s41, s17, 16
-; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_add_f32_e64 v13, s29, 1.0
-; GFX11-NEXT: v_add_f32_e64 v14, s28, 1.0
-; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
-; GFX11-NEXT: v_add_f32_e64 v16, s26, 1.0
-; GFX11-NEXT: v_add_f32_e64 v17, s25, 1.0
-; GFX11-NEXT: v_add_f32_e64 v8, s24, 1.0
-; GFX11-NEXT: v_add_f32_e64 v9, s23, 1.0
-; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
-; GFX11-NEXT: v_add_f32_e64 v11, s21, 1.0
-; GFX11-NEXT: v_add_f32_e64 v12, s20, 1.0
-; GFX11-NEXT: v_add_f32_e64 v3, s19, 1.0
-; GFX11-NEXT: v_add_f32_e64 v4, s18, 1.0
-; GFX11-NEXT: v_add_f32_e64 v5, s17, 1.0
-; GFX11-NEXT: v_add_f32_e64 v6, s16, 1.0
-; GFX11-NEXT: v_add_f32_e64 v7, s3, 1.0
-; GFX11-NEXT: v_add_f32_e64 v0, s2, 1.0
-; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
-; GFX11-NEXT: v_add_f32_e64 v2, s0, 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: s_branch .LBB29_5
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
-; GFX11-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s18 :: v_dual_mov_b32 v3, s19
-; GFX11-NEXT: v_dual_mov_b32 v12, s20 :: v_dual_mov_b32 v11, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v9, s23
-; GFX11-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v17, s25
-; GFX11-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v13, s29
-; GFX11-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
-; GFX11-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
-; GFX11-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
-; GFX11-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
-; GFX11-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
-; GFX11-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
-; GFX11-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
-; GFX11-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
-; GFX11-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
-; GFX11-NEXT: .LBB29_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v0, v35, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v1, v34, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v2, v33, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v32, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v31, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v6, v29, 16, v33
-; GFX11-NEXT: v_lshl_or_b32 v7, v28, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v9, v26, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v24, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v12, v23, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v5, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v10, v25, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v13, v22, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v21, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v20, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v19, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v17, v18, 16, v24
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v18f32_to_v36i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, s29, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, s28, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, s27, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, s26, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, s25, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, s24, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, s23, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, s22, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, s21, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, s20, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, s19, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, s18, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, s17, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, s16, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, s3, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, s2, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, s1, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, s0, 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-TRUE16-NEXT: s_branch .LBB29_5
+; GFX11-TRUE16-NEXT: .LBB29_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
+; GFX11-TRUE16-NEXT: .LBB29_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v19 :: v_dual_mov_b32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v18.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v18f32_to_v36i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, s29, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, s28, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, s27, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, s26, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, s25, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, s24, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, s23, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, s22, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, s21, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, s20, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, s19, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, s18, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, s17, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, s16, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, s3, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, s2, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, s1, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, s0, 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: s_branch .LBB29_5
+; GFX11-FAKE16-NEXT: .LBB29_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s18 :: v_dual_mov_b32 v3, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s20 :: v_dual_mov_b32 v11, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v9, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v17, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v13, s29
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
+; GFX11-FAKE16-NEXT: .LBB29_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v33, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v31, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v29, 16, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v28, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v24, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v23, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v19, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v24
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12999,149 +13135,285 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr22
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v18f32_to_v36f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s6, s28, 16
-; GFX11-NEXT: s_lshr_b32 s7, s27, 16
-; GFX11-NEXT: s_lshr_b32 s8, s26, 16
-; GFX11-NEXT: s_lshr_b32 s9, s25, 16
-; GFX11-NEXT: s_lshr_b32 s10, s24, 16
-; GFX11-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-NEXT: s_lshr_b32 s12, s22, 16
-; GFX11-NEXT: s_lshr_b32 s13, s21, 16
-; GFX11-NEXT: s_lshr_b32 s14, s20, 16
-; GFX11-NEXT: s_lshr_b32 s15, s19, 16
-; GFX11-NEXT: s_lshr_b32 s40, s18, 16
-; GFX11-NEXT: s_lshr_b32 s41, s17, 16
-; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_add_f32_e64 v13, s29, 1.0
-; GFX11-NEXT: v_add_f32_e64 v14, s28, 1.0
-; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
-; GFX11-NEXT: v_add_f32_e64 v16, s26, 1.0
-; GFX11-NEXT: v_add_f32_e64 v17, s25, 1.0
-; GFX11-NEXT: v_add_f32_e64 v8, s24, 1.0
-; GFX11-NEXT: v_add_f32_e64 v9, s23, 1.0
-; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
-; GFX11-NEXT: v_add_f32_e64 v11, s21, 1.0
-; GFX11-NEXT: v_add_f32_e64 v12, s20, 1.0
-; GFX11-NEXT: v_add_f32_e64 v3, s19, 1.0
-; GFX11-NEXT: v_add_f32_e64 v4, s18, 1.0
-; GFX11-NEXT: v_add_f32_e64 v5, s17, 1.0
-; GFX11-NEXT: v_add_f32_e64 v6, s16, 1.0
-; GFX11-NEXT: v_add_f32_e64 v7, s3, 1.0
-; GFX11-NEXT: v_add_f32_e64 v0, s2, 1.0
-; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
-; GFX11-NEXT: v_add_f32_e64 v2, s0, 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: s_branch .LBB33_5
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
-; GFX11-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s18 :: v_dual_mov_b32 v3, s19
-; GFX11-NEXT: v_dual_mov_b32 v12, s20 :: v_dual_mov_b32 v11, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v9, s23
-; GFX11-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v17, s25
-; GFX11-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v13, s29
-; GFX11-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
-; GFX11-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
-; GFX11-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
-; GFX11-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
-; GFX11-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
-; GFX11-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
-; GFX11-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
-; GFX11-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
-; GFX11-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
-; GFX11-NEXT: .LBB33_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v0, v35, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v1, v34, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v2, v33, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v32, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v31, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v6, v29, 16, v33
-; GFX11-NEXT: v_lshl_or_b32 v7, v28, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v9, v26, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v24, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v12, v23, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v5, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v10, v25, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v13, v22, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v21, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v20, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v19, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v17, v18, 16, v24
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v18f32_to_v36f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, s29, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, s28, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, s27, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, s26, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, s25, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, s24, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, s23, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, s22, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, s21, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, s20, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, s19, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, s18, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, s17, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, s16, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, s3, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, s2, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, s1, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, s0, 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-TRUE16-NEXT: s_branch .LBB33_5
+; GFX11-TRUE16-NEXT: .LBB33_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
+; GFX11-TRUE16-NEXT: .LBB33_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v19 :: v_dual_mov_b32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v18.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v18f32_to_v36f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, s29, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, s28, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, s27, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, s26, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, s25, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, s24, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, s23, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, s22, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, s21, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, s20, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, s19, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, s18, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, s17, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, s16, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, s3, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, s2, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, s1, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, s0, 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: s_branch .LBB33_5
+; GFX11-FAKE16-NEXT: .LBB33_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s18 :: v_dual_mov_b32 v3, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s20 :: v_dual_mov_b32 v11, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v9, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v17, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v13, s29
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
+; GFX11-FAKE16-NEXT: .LBB33_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v33, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v31, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v29, 16, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v28, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v24, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v23, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v19, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v24
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21895,140 +22167,270 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: ; implicit-def: $vgpr22
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v9f64_to_v36i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s14, s28, 16
-; GFX11-NEXT: s_lshr_b32 s6, s27, 16
-; GFX11-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-NEXT: s_lshr_b32 s7, s25, 16
-; GFX11-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-NEXT: s_lshr_b32 s8, s23, 16
-; GFX11-NEXT: s_lshr_b32 s41, s22, 16
-; GFX11-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-NEXT: s_lshr_b32 s42, s20, 16
-; GFX11-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-NEXT: s_lshr_b32 s43, s18, 16
-; GFX11-NEXT: s_lshr_b32 s11, s17, 16
-; GFX11-NEXT: s_lshr_b32 s44, s16, 16
-; GFX11-NEXT: s_lshr_b32 s12, s3, 16
-; GFX11-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-NEXT: s_lshr_b32 s13, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[17:18], s[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], s[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], s[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[3:4], s[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], s[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[19:20], s[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v0
-; GFX11-NEXT: s_branch .LBB49_5
-; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
-; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
-; GFX11-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
-; GFX11-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v20, s3
-; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v6, s17
-; GFX11-NEXT: v_dual_mov_b32 v13, s28 :: v_dual_mov_b32 v4, s19
-; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v18, s25
-; GFX11-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v16, s27
-; GFX11-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v14, s29
-; GFX11-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v7, s43
-; GFX11-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v27, s42
-; GFX11-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v21, s14
-; GFX11-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v35, s13
-; GFX11-NEXT: v_dual_mov_b32 v12, s40 :: v_dual_mov_b32 v33, s12
-; GFX11-NEXT: v_dual_mov_b32 v22, s15 :: v_dual_mov_b32 v31, s10
-; GFX11-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
-; GFX11-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
-; GFX11-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
-; GFX11-NEXT: .LBB49_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v33, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v32, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v7, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v26, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v1, v35, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v7, v31, 16, v34
-; GFX11-NEXT: v_lshl_or_b32 v9, v29, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v28, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v13, v25, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v22, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v24, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v16, v21, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v17, v23, 16, v19
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v9f64_to_v36i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], s[28:29], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
+; GFX11-TRUE16-NEXT: s_branch .LBB49_5
+; GFX11-TRUE16-NEXT: .LBB49_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v35, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s45 :: v_dual_mov_b32 v33, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v31, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v29, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v27, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s41 :: v_dual_mov_b32 v25, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s40 :: v_dual_mov_b32 v23, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s15 :: v_dual_mov_b32 v21, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s14 :: v_dual_mov_b32 v19, s5
+; GFX11-TRUE16-NEXT: .LBB49_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, v32 :: v_dual_mov_b32 v31, v31
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v22, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, v34 :: v_dual_mov_b32 v35, v35
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v32.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, v33 :: v_dual_mov_b32 v27, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v30 :: v_dual_mov_b32 v29, v29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v28 :: v_dual_mov_b32 v25, v25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v26 :: v_dual_mov_b32 v21, v21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v24 :: v_dual_mov_b32 v19, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v22, v23
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v18.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v19.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v9f64_to_v36i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], s[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], s[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], s[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[3:4], s[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], s[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], s[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
+; GFX11-FAKE16-NEXT: s_branch .LBB49_5
+; GFX11-FAKE16-NEXT: .LBB49_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v20, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v6, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s28 :: v_dual_mov_b32 v4, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v18, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v16, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v14, s29
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v7, s43
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v27, s42
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v21, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v35, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s40 :: v_dual_mov_b32 v33, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s15 :: v_dual_mov_b32 v31, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
+; GFX11-FAKE16-NEXT: .LBB49_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v33, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v32, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v7, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v26, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v35, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v31, 16, v34
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v22, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v23, 16, v19
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24595,140 +24997,270 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr22
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v9f64_to_v36f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s14, s28, 16
-; GFX11-NEXT: s_lshr_b32 s6, s27, 16
-; GFX11-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-NEXT: s_lshr_b32 s7, s25, 16
-; GFX11-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-NEXT: s_lshr_b32 s8, s23, 16
-; GFX11-NEXT: s_lshr_b32 s41, s22, 16
-; GFX11-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-NEXT: s_lshr_b32 s42, s20, 16
-; GFX11-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-NEXT: s_lshr_b32 s43, s18, 16
-; GFX11-NEXT: s_lshr_b32 s11, s17, 16
-; GFX11-NEXT: s_lshr_b32 s44, s16, 16
-; GFX11-NEXT: s_lshr_b32 s12, s3, 16
-; GFX11-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-NEXT: s_lshr_b32 s13, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[17:18], s[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], s[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], s[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[3:4], s[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], s[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[19:20], s[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v0
-; GFX11-NEXT: s_branch .LBB53_5
-; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB53_2
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
-; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
-; GFX11-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
-; GFX11-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v20, s3
-; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v6, s17
-; GFX11-NEXT: v_dual_mov_b32 v13, s28 :: v_dual_mov_b32 v4, s19
-; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v18, s25
-; GFX11-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v16, s27
-; GFX11-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v14, s29
-; GFX11-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v7, s43
-; GFX11-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v27, s42
-; GFX11-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v21, s14
-; GFX11-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v35, s13
-; GFX11-NEXT: v_dual_mov_b32 v12, s40 :: v_dual_mov_b32 v33, s12
-; GFX11-NEXT: v_dual_mov_b32 v22, s15 :: v_dual_mov_b32 v31, s10
-; GFX11-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
-; GFX11-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
-; GFX11-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
-; GFX11-NEXT: .LBB53_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v33, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v32, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v7, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v26, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v1, v35, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v7, v31, 16, v34
-; GFX11-NEXT: v_lshl_or_b32 v9, v29, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v28, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v13, v25, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v22, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v24, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v16, v21, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v17, v23, 16, v19
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v9f64_to_v36f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], s[28:29], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
+; GFX11-TRUE16-NEXT: s_branch .LBB53_5
+; GFX11-TRUE16-NEXT: .LBB53_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v35, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s45 :: v_dual_mov_b32 v33, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v31, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v29, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v27, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s41 :: v_dual_mov_b32 v25, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s40 :: v_dual_mov_b32 v23, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s15 :: v_dual_mov_b32 v21, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s14 :: v_dual_mov_b32 v19, s5
+; GFX11-TRUE16-NEXT: .LBB53_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, v32 :: v_dual_mov_b32 v31, v31
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v22, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, v34 :: v_dual_mov_b32 v35, v35
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v32.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, v33 :: v_dual_mov_b32 v27, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v30 :: v_dual_mov_b32 v29, v29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v28 :: v_dual_mov_b32 v25, v25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v26 :: v_dual_mov_b32 v21, v21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v24 :: v_dual_mov_b32 v19, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v22, v23
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v18.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v19.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v9f64_to_v36f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], s[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], s[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], s[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[3:4], s[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], s[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], s[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
+; GFX11-FAKE16-NEXT: s_branch .LBB53_5
+; GFX11-FAKE16-NEXT: .LBB53_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v20, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v6, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s28 :: v_dual_mov_b32 v4, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v18, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v16, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v14, s29
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v7, s43
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v27, s42
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v21, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v35, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s40 :: v_dual_mov_b32 v33, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s15 :: v_dual_mov_b32 v31, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
+; GFX11-FAKE16-NEXT: .LBB53_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v33, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v32, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v7, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v26, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v35, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v31, 16, v34
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v22, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v23, 16, v19
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -27654,149 +28186,285 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v3, v19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36i16_to_v36f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s45, s29, 16
-; GFX11-NEXT: s_lshr_b32 s44, s28, 16
-; GFX11-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-NEXT: s_lshr_b32 s42, s26, 16
-; GFX11-NEXT: s_lshr_b32 s41, s25, 16
-; GFX11-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-NEXT: s_lshr_b32 s15, s23, 16
-; GFX11-NEXT: s_lshr_b32 s14, s22, 16
-; GFX11-NEXT: s_lshr_b32 s13, s21, 16
-; GFX11-NEXT: s_lshr_b32 s12, s20, 16
-; GFX11-NEXT: s_lshr_b32 s11, s19, 16
-; GFX11-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-NEXT: s_lshr_b32 s8, s2, 16
-; GFX11-NEXT: s_lshr_b32 s4, s1, 16
-; GFX11-NEXT: s_lshr_b32 s5, s0, 16
-; GFX11-NEXT: s_mov_b32 s46, 0
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-NEXT: s_pack_ll_b32_b16 s29, s29, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s28, s28, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s27, s27, s43
-; GFX11-NEXT: s_pack_ll_b32_b16 s26, s26, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s25, s25, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s24, s24, s40
-; GFX11-NEXT: s_pack_ll_b32_b16 s15, s23, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s14, s22, s14
-; GFX11-NEXT: s_pack_ll_b32_b16 s13, s21, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s12, s20, s12
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s19, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s8
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s5
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, s28, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, s26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, s25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, s15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, s14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, s11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, s10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, s9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, s0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v0, s2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, s3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v13
-; GFX11-NEXT: s_branch .LBB57_5
-; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
-; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
-; GFX11-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v8, s24
-; GFX11-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v10, s22
-; GFX11-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v12, s20
-; GFX11-NEXT: v_dual_mov_b32 v3, s19 :: v_dual_mov_b32 v4, s18
-; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
-; GFX11-NEXT: v_dual_mov_b32 v7, s3 :: v_dual_mov_b32 v0, s2
-; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s0
-; GFX11-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
-; GFX11-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
-; GFX11-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
-; GFX11-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
-; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
-; GFX11-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
-; GFX11-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
-; GFX11-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
-; GFX11-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
-; GFX11-NEXT: .LBB57_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v0, v35, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v1, v34, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v2, v33, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v32, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v31, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v6, v29, 16, v33
-; GFX11-NEXT: v_lshl_or_b32 v7, v28, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v9, v26, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v24, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v12, v23, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v5, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v10, v25, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v13, v22, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v21, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v20, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v19, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v17, v18, 16, v24
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36i16_to_v36f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s23, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s22, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s21, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-TRUE16-NEXT: s_branch .LBB57_5
+; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB57_2
+; GFX11-TRUE16-NEXT: .LBB57_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v19 :: v_dual_mov_b32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v18.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36i16_to_v36f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s23, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s22, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s21, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s28, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v13
+; GFX11-FAKE16-NEXT: s_branch .LBB57_5
+; GFX11-FAKE16-NEXT: .LBB57_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB57_2
+; GFX11-FAKE16-NEXT: .LBB57_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v8, s24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v12, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s19 :: v_dual_mov_b32 v4, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s3 :: v_dual_mov_b32 v0, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
+; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v33, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v31, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v29, 16, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v28, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v24, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v23, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v19, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v24
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29137,149 +29805,285 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v3, v19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36f16_to_v36i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s45, s29, 16
-; GFX11-NEXT: s_lshr_b32 s44, s28, 16
-; GFX11-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-NEXT: s_lshr_b32 s42, s26, 16
-; GFX11-NEXT: s_lshr_b32 s41, s25, 16
-; GFX11-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-NEXT: s_lshr_b32 s15, s23, 16
-; GFX11-NEXT: s_lshr_b32 s14, s22, 16
-; GFX11-NEXT: s_lshr_b32 s13, s21, 16
-; GFX11-NEXT: s_lshr_b32 s12, s20, 16
-; GFX11-NEXT: s_lshr_b32 s11, s19, 16
-; GFX11-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-NEXT: s_lshr_b32 s8, s2, 16
-; GFX11-NEXT: s_lshr_b32 s4, s1, 16
-; GFX11-NEXT: s_lshr_b32 s5, s0, 16
-; GFX11-NEXT: s_mov_b32 s46, 0
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-NEXT: s_pack_ll_b32_b16 s29, s29, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s28, s28, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s27, s27, s43
-; GFX11-NEXT: s_pack_ll_b32_b16 s26, s26, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s25, s25, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s24, s24, s40
-; GFX11-NEXT: s_pack_ll_b32_b16 s15, s23, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s14, s22, s14
-; GFX11-NEXT: s_pack_ll_b32_b16 s13, s21, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s12, s20, s12
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s19, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s8
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s5
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s29 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v13
-; GFX11-NEXT: s_branch .LBB59_5
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
-; GFX11-NEXT: .LBB59_4:
-; GFX11-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
-; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
-; GFX11-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v8, s24
-; GFX11-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v10, s22
-; GFX11-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v12, s20
-; GFX11-NEXT: v_dual_mov_b32 v3, s19 :: v_dual_mov_b32 v4, s18
-; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
-; GFX11-NEXT: v_dual_mov_b32 v7, s3 :: v_dual_mov_b32 v0, s2
-; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s0
-; GFX11-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
-; GFX11-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
-; GFX11-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
-; GFX11-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
-; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
-; GFX11-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
-; GFX11-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
-; GFX11-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
-; GFX11-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
-; GFX11-NEXT: .LBB59_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v0, v35, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v1, v34, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v2, v33, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v32, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v31, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v6, v29, 16, v33
-; GFX11-NEXT: v_lshl_or_b32 v7, v28, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v9, v26, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v24, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v12, v23, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v5, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v10, v25, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v13, v22, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v21, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v20, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v19, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v17, v18, 16, v24
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36f16_to_v36i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s23, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s22, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s21, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-TRUE16-NEXT: s_branch .LBB59_5
+; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB59_2
+; GFX11-TRUE16-NEXT: .LBB59_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v19 :: v_dual_mov_b32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v18.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36f16_to_v36i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s23, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s22, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s21, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v13
+; GFX11-FAKE16-NEXT: s_branch .LBB59_5
+; GFX11-FAKE16-NEXT: .LBB59_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB59_2
+; GFX11-FAKE16-NEXT: .LBB59_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v8, s24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v12, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s19 :: v_dual_mov_b32 v4, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s3 :: v_dual_mov_b32 v0, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
+; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v33, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v31, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v29, 16, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v28, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v24, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v23, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v19, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v24
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
index 47cb6bd..44cfd6c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
@@ -4913,93 +4913,270 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v20i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v20i32_scalar:
@@ -8342,93 +8519,270 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v20i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v20i32_scalar:
@@ -11100,142 +11454,271 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr26
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v20f32_to_v40i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v19, s1
-; GFX11-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v3, s16
-; GFX11-NEXT: v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v9, s17
-; GFX11-NEXT: v_dual_mov_b32 v8, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v5, s21
-; GFX11-NEXT: v_dual_mov_b32 v14, s22 :: v_dual_mov_b32 v13, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v11, s25
-; GFX11-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v21, v21, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v5, v36, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v6, v35, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v9, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v10, v31, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v11, v30, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v28, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v14, v27, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v22, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v21
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v3, v38, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v37, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v7, v34, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v12, v29, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v24, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v23, 16, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, v20
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr2
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v20f32_to_v40i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v20f32_to_v40i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v19, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v3, s16
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v9, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v5, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s22 :: v_dual_mov_b32 v13, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v11, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v28, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v27, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v22, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v37, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v24, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v23, 16, v0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v20
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12629,93 +13112,270 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v20f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v20f32_scalar:
@@ -14269,142 +14929,271 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr26
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v20f32_to_v40f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v19, s1
-; GFX11-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v3, s16
-; GFX11-NEXT: v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v9, s17
-; GFX11-NEXT: v_dual_mov_b32 v8, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v5, s21
-; GFX11-NEXT: v_dual_mov_b32 v14, s22 :: v_dual_mov_b32 v13, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v11, s25
-; GFX11-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v21, v21, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v5, v36, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v6, v35, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v9, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v10, v31, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v11, v30, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v28, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v14, v27, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v22, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v21
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v3, v38, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v37, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v7, v34, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v12, v29, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v24, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v23, 16, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, v20
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr2
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v20f32_to_v40f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v20f32_to_v40f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v19, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v3, s16
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v9, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v5, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s22 :: v_dual_mov_b32 v13, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v11, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v28, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v27, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v22, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v37, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v24, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v23, 16, v0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v20
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16043,93 +16832,270 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v20f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v20f32_scalar:
@@ -19655,93 +20621,270 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v10i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v10i64_scalar:
@@ -23094,93 +24237,270 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v10i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v10i64_scalar:
@@ -24382,142 +25702,271 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr26
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v10f64_to_v40i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v21, s1
-; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
-; GFX11-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v19, s17
-; GFX11-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
-; GFX11-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v6, s21
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v13, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v11, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[3:4], v[3:4], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v37, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v36, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v34, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v7, v33, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v5, v35, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v11, v30, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v28, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v22, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v21
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v9, v9, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v10, v31, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v29, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v14, v27, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v24, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v23, 16, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, v20
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr2
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr9
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v10f64_to_v40i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v10f64_to_v40i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v21, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v19, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v6, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v13, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v11, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[3:4], v[3:4], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v36, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v35, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v28, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v22, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v9, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v23, 16, v0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v20
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25911,93 +27360,270 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v10f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v10f64_scalar:
@@ -27484,142 +29110,271 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr26
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v10f64_to_v40f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v21, s1
-; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
-; GFX11-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v19, s17
-; GFX11-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
-; GFX11-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v6, s21
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v13, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v11, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[3:4], v[3:4], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v37, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v36, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v34, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v7, v33, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v5, v35, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v11, v30, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v28, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v22, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v21
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v9, v9, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v10, v31, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v29, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v14, v27, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v24, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v23, 16, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, v20
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr2
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr9
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v10f64_to_v40f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v10f64_to_v40f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v21, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v19, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v6, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v13, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v11, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[3:4], v[3:4], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v36, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v35, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v28, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v22, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v9, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v23, 16, v0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v20
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29258,93 +31013,270 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v10f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v10f64_scalar:
@@ -31057,12 +32989,10 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v40f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v19.h
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s26, 16
@@ -31083,17 +33013,16 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v20.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
@@ -31109,59 +33038,61 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s9
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s27, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v10, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s25 :: v_dual_mov_b32 v12, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s23 :: v_dual_mov_b32 v14, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s21 :: v_dual_mov_b32 v6, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s17 :: v_dual_mov_b32 v2, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s1 :: v_dual_mov_b32 v21, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v23, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s43 :: v_dual_mov_b32 v25, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v27, s40
@@ -31172,47 +33103,37 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s4 :: v_dual_mov_b32 v39, s9
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v37, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v20
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v36, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v33, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v31, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v21
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v30, 16, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v30, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v25, 16, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v28, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v27, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v26, 16, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v24, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v23, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v22, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v20 :: v_dual_mov_b32 v1, v21
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v40f16_scalar:
@@ -32879,12 +34800,10 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v40i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v19.h
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s26, 16
@@ -32905,17 +34824,16 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v20.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
@@ -32931,59 +34849,61 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s9
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v10, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s25 :: v_dual_mov_b32 v12, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s23 :: v_dual_mov_b32 v14, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s21 :: v_dual_mov_b32 v6, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s17 :: v_dual_mov_b32 v2, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s1 :: v_dual_mov_b32 v21, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v23, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s43 :: v_dual_mov_b32 v25, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v27, s40
@@ -32994,47 +34914,37 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s4 :: v_dual_mov_b32 v39, s9
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v37, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v20
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v36, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v33, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v31, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v21
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v30, 16, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v30, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v25, 16, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v28, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v27, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v26, 16, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v24, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v23, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v22, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v20 :: v_dual_mov_b32 v1, v21
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v40i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
index 11f90b9..14e17ce 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
@@ -2411,66 +2411,123 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_i64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_i64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v5 :: v_dual_add_nc_u32 v9, v9, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB23_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB23_2
+; GFX11-TRUE16-NEXT: .LBB23_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_i64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB23_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB23_2
+; GFX11-FAKE16-NEXT: .LBB23_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5542,66 +5599,123 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_f64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_f64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v5 :: v_dual_add_nc_u32 v9, v9, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB47_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_f64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB47_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: .LBB47_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8386,66 +8500,123 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_v2i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
-; GFX11-NEXT: .LBB67_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_v2i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v5 :: v_dual_add_nc_u32 v9, v9, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB67_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB67_2
+; GFX11-TRUE16-NEXT: .LBB67_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_v2i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB67_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB67_2
+; GFX11-FAKE16-NEXT: .LBB67_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10905,66 +11076,123 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_v2f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_v2f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v5 :: v_dual_add_nc_u32 v9, v9, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB83_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB83_2
+; GFX11-TRUE16-NEXT: .LBB83_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_v2f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB83_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB83_2
+; GFX11-FAKE16-NEXT: .LBB83_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12814,47 +13042,40 @@ define <4 x i16> @bitcast_v4bf16_to_v4i16(<4 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v2
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v9, v9, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v2, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v5 :: v_dual_add_f32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v9, v11, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v0, 16, v1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v2, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
; GFX11-TRUE16-NEXT: .LBB94_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -13071,60 +13292,112 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_v4i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v3, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
-; GFX11-NEXT: .LBB95_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_v4i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB95_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB95_2
+; GFX11-TRUE16-NEXT: .LBB95_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_v4i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v3, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB95_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB95_2
+; GFX11-FAKE16-NEXT: .LBB95_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14889,65 +15162,124 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_v4f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v4, v8 :: v_dual_and_b32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
-; GFX11-NEXT: .LBB103_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_v4f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB103_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB103_2
+; GFX11-TRUE16-NEXT: .LBB103_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_v4f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v4, v8 :: v_dual_and_b32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB103_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB103_2
+; GFX11-FAKE16-NEXT: .LBB103_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16614,88 +16946,172 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_v8i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
-; GFX11-NEXT: s_lshr_b32 s6, s1, 24
-; GFX11-NEXT: s_lshr_b32 s8, s1, 16
-; GFX11-NEXT: s_lshr_b32 s7, s1, 8
-; GFX11-NEXT: s_lshr_b32 s5, s0, 16
-; GFX11-NEXT: s_lshr_b32 s3, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
-; GFX11-NEXT: .LBB109_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, v3, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v9, v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v10, v6, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[9:10]
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v10
-; GFX11-NEXT: v_mov_b32_e32 v4, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB109_3:
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB109_2
-; GFX11-NEXT: .LBB109_4:
-; GFX11-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s6
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s7
-; GFX11-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v1, s3
-; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_v8i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s1, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s1, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-TRUE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v7 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v1.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v8.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v6.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[9:10]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v10
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v8
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB109_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr3
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr2
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: s_branch .LBB109_2
+; GFX11-TRUE16-NEXT: .LBB109_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v1, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_v8i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s1, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s1, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-FAKE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, v3, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v6, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[9:10]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 24, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB109_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr3
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr2
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: s_branch .LBB109_2
+; GFX11-FAKE16-NEXT: .LBB109_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v1, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
index 2cc7c44..87d5157 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
@@ -5328,105 +5328,278 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v22i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v22i32_scalar:
@@ -9137,105 +9310,278 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v22i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v22i32_scalar:
@@ -12099,155 +12445,295 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr30
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v22f32_to_v44i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v21, s1
-; GFX11-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v19, s3
-; GFX11-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v5, s18
-; GFX11-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v11, s19
-; GFX11-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v7, s23
-; GFX11-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v12, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v12, 1.0, v12 :: v_dual_add_f32 v13, 1.0, v13
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v23, v23, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v7, v48, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v36, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v5, v50, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v49, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v39, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v37, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v28, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v21, v26, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v9, v38, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v30, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v29, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v27, 16, v2
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_mov_b32_e32 v2, v22
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v22f32_to_v44i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v22f32_to_v44i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v21, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v19, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v5, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v11, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v7, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v14, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v12, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v12, 1.0, v12 :: v_dual_add_f32 v13, 1.0, v13
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v23, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v49, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v37, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v26, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v24, 16, v22
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v27, 16, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13805,105 +14291,278 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v22f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v22f32_scalar:
@@ -15630,155 +16289,295 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr30
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v22f32_to_v44f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v21, s1
-; GFX11-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v19, s3
-; GFX11-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v5, s18
-; GFX11-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v11, s19
-; GFX11-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v7, s23
-; GFX11-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v12, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v12, 1.0, v12 :: v_dual_add_f32 v13, 1.0, v13
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v23, v23, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v7, v48, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v36, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v5, v50, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v49, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v39, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v37, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v28, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v21, v26, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v9, v38, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v30, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v29, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v27, 16, v2
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_mov_b32_e32 v2, v22
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v22f32_to_v44f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v22f32_to_v44f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v21, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v19, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v5, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v11, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v7, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v14, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v12, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v12, 1.0, v12 :: v_dual_add_f32 v13, 1.0, v13
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v23, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v49, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v37, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v26, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v24, 16, v22
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v27, 16, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17607,105 +18406,278 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v22f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v22f32_scalar:
@@ -21568,105 +22540,278 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v11i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v11i64_scalar:
@@ -25389,105 +26534,278 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v11i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v11i64_scalar:
@@ -26793,154 +28111,294 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr30
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v11f64_to_v44i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v23, s1
-; GFX11-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v21, s3
-; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
-; GFX11-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v19, s19
-; GFX11-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
-; GFX11-NEXT: v_dual_mov_b32 v7, s22 :: v_dual_mov_b32 v8, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v15, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v13, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v49, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v48, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v7, v39, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v21, v26, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v38, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v37, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v36, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v11, v11, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v14, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v30, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v29, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v28, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v20, v27, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-NEXT: v_mov_b32_e32 v2, v22
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr11
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v11f64_to_v44i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v11f64_to_v44i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v23, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v21, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v19, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s22 :: v_dual_mov_b32 v8, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v15, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v13, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v48, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v24, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v39, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v26, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v11, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v27, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -28498,105 +29956,278 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v11f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v11f64_scalar:
@@ -30248,154 +31879,294 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr30
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v11f64_to_v44f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v23, s1
-; GFX11-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v21, s3
-; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
-; GFX11-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v19, s19
-; GFX11-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
-; GFX11-NEXT: v_dual_mov_b32 v7, s22 :: v_dual_mov_b32 v8, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v15, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v13, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v49, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v48, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v7, v39, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v21, v26, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v38, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v37, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v36, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v11, v11, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v14, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v30, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v29, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v28, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v20, v27, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-NEXT: v_mov_b32_e32 v2, v22
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr11
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v11f64_to_v44f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v11f64_to_v44f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v23, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v21, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v19, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s22 :: v_dual_mov_b32 v8, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v15, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v13, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v48, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v24, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v39, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v26, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v11, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v27, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -32224,105 +33995,278 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v11f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v11f64_scalar:
@@ -34283,15 +36227,10 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v44f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v21.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v21.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -34313,19 +36252,18 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v22.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
@@ -34343,63 +36281,67 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s25, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s24, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s14, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v6
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v12, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s27 :: v_dual_mov_b32 v14, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s25 :: v_dual_mov_b32 v16, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s23 :: v_dual_mov_b32 v8, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s19 :: v_dual_mov_b32 v4, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s3 :: v_dual_mov_b32 v23, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s1 :: v_dual_mov_b32 v25, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s45 :: v_dual_mov_b32 v27, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v29, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s41 :: v_dual_mov_b32 v31, s40
@@ -34410,53 +36352,40 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s5 :: v_dual_mov_b32 v49, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s4 :: v_dual_mov_b32 v51, s12
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v24
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v51, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v50, 16, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v49, 16, v23
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v48, 16, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v38, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v39, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v37, 16, v50
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v35, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v31, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v30, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v28, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v27, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v26, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v44f16_scalar:
@@ -36279,15 +38208,10 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v44i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v21.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v21.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -36309,19 +38233,18 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v22.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
@@ -36339,63 +38262,67 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s14 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v6
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v12, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s27 :: v_dual_mov_b32 v14, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s25 :: v_dual_mov_b32 v16, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s23 :: v_dual_mov_b32 v8, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s19 :: v_dual_mov_b32 v4, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s3 :: v_dual_mov_b32 v23, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s1 :: v_dual_mov_b32 v25, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s45 :: v_dual_mov_b32 v27, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v29, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s41 :: v_dual_mov_b32 v31, s40
@@ -36406,53 +38333,40 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s5 :: v_dual_mov_b32 v49, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s4 :: v_dual_mov_b32 v51, s12
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v24
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v51, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v50, 16, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v49, 16, v23
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v48, 16, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v38, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v39, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v37, 16, v50
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v35, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v31, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v30, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v28, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v27, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v26, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v44i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
index c35e183..fb2e94f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
@@ -5805,117 +5805,286 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v24i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v24i32_scalar:
@@ -10044,117 +10213,286 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v24i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v24i32_scalar:
@@ -13212,166 +13550,317 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr34
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v24f32_to_v48i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v23, s1
-; GFX11-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v21, s3
-; GFX11-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v19, s17
-; GFX11-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v7, s20
-; GFX11-NEXT: v_dual_mov_b32 v8, s19 :: v_dual_mov_b32 v13, s21
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v9, s25
-; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v51, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v12, v49, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v35, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v9, v52, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v48, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v7, v54, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v53, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v11, v50, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v19, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v32, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v23, v30, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr6
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v24f32_to_v48i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v24f32_to_v48i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v23, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v21, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v19, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v7, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s19 :: v_dual_mov_b32 v13, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v9, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v14, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v51, 16, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v49, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v26, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v48, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v33, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v53, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v32, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v31, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15153,117 +15642,286 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v24f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v24f32_scalar:
@@ -17167,166 +17825,317 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr34
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v24f32_to_v48f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v23, s1
-; GFX11-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v21, s3
-; GFX11-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v19, s17
-; GFX11-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v7, s20
-; GFX11-NEXT: v_dual_mov_b32 v8, s19 :: v_dual_mov_b32 v13, s21
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v9, s25
-; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v51, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v12, v49, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v35, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v9, v52, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v48, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v7, v54, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v53, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v11, v50, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v19, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v32, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v23, v30, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr6
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v24f32_to_v48f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v24f32_to_v48f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v23, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v21, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v19, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v7, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s19 :: v_dual_mov_b32 v13, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v9, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v14, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v51, 16, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v49, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v26, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v48, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v33, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v53, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v32, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v31, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19382,117 +20191,286 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v24f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v24f32_scalar:
@@ -23764,117 +24742,286 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v12i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v12i64_scalar:
@@ -28015,117 +29162,286 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v12i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v12i64_scalar:
@@ -29551,166 +30867,317 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr34
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v12f64_to_v48i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v25, s1
-; GFX11-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v23, s3
-; GFX11-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v21, s17
-; GFX11-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
-; GFX11-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v19, s21
-; GFX11-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
-; GFX11-NEXT: v_dual_mov_b32 v9, s24 :: v_dual_mov_b32 v10, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v7, v53, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v52, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v51, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v35, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v13, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v10, v50, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v49, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v48, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v19, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v32, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v23, v30, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr6
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr13
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v12f64_to_v48i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v12f64_to_v48i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v25, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v23, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v21, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v19, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s24 :: v_dual_mov_b32 v10, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v52, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v51, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v26, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v13, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v33, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v32, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v31, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -31492,117 +32959,286 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v12f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v12f64_scalar:
@@ -33424,166 +35060,317 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr34
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v12f64_to_v48f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v25, s1
-; GFX11-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v23, s3
-; GFX11-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v21, s17
-; GFX11-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
-; GFX11-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v19, s21
-; GFX11-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
-; GFX11-NEXT: v_dual_mov_b32 v9, s24 :: v_dual_mov_b32 v10, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v7, v53, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v52, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v51, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v35, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v13, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v10, v50, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v49, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v48, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v19, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v32, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v23, v30, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr6
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr13
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v12f64_to_v48f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v12f64_to_v48f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v25, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v23, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v21, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v19, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s24 :: v_dual_mov_b32 v10, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v52, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v51, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v26, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v13, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v33, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v32, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v31, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35639,117 +37426,286 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v12f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v12f64_scalar:
@@ -37964,19 +39920,11 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v48f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v23.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -37998,22 +39946,21 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v24.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
@@ -38032,67 +39979,73 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s29, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s28, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s27, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v8
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s29 :: v_dual_mov_b32 v15, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s25 :: v_dual_mov_b32 v10, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s21 :: v_dual_mov_b32 v6, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s17 :: v_dual_mov_b32 v29, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s3 :: v_dual_mov_b32 v25, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s1 :: v_dual_mov_b32 v27, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s45 :: v_dual_mov_b32 v31, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s43 :: v_dual_mov_b32 v33, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s41 :: v_dual_mov_b32 v35, s40
@@ -38103,58 +40056,43 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s5 :: v_dual_mov_b32 v53, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s14
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v24
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v52, 16, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v54, 16, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v53, 16, v64
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v49, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v48, 16, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v39, 16, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v51, 16, v29
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v50, 16, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v37, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v33, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v32, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v31, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v30, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v4
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v28 :: v_dual_mov_b32 v5, v29
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v48f16_scalar:
@@ -40168,19 +42106,11 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v48i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v23.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -40202,22 +42132,21 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v24.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
@@ -40236,67 +42165,73 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s29 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v8
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s29 :: v_dual_mov_b32 v15, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s25 :: v_dual_mov_b32 v10, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s21 :: v_dual_mov_b32 v6, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s17 :: v_dual_mov_b32 v29, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s3 :: v_dual_mov_b32 v25, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s1 :: v_dual_mov_b32 v27, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s45 :: v_dual_mov_b32 v31, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s43 :: v_dual_mov_b32 v33, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s41 :: v_dual_mov_b32 v35, s40
@@ -40307,58 +42242,43 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s5 :: v_dual_mov_b32 v53, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s14
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v24
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v52, 16, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v54, 16, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v53, 16, v64
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v49, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v48, 16, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v39, 16, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v51, 16, v29
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v50, 16, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v37, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v33, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v32, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v31, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v30, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v4
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v28 :: v_dual_mov_b32 v5, v29
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v48i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
index 29005a4..07cdbef 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
@@ -6286,129 +6286,295 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v26i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v26i32_scalar:
@@ -10946,129 +11112,295 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v26i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v26i32_scalar:
@@ -14389,178 +14721,340 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr38
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v26f32_to_v52i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v25, s1
-; GFX11-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v23, s3
-; GFX11-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v21, s17
-; GFX11-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v19, s19
-; GFX11-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v9, s22
-; GFX11-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v15, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v11, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v8, v8, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v11, v64, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v12, v55, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v53, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v52, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v48, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v16, v51, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v50, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v49, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v37, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v33
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v67, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v9, v66, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v65, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v13, v54, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v23, v36, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v24, v35, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v25, v34, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v32
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr8
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v26f32_to_v52i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v26f32_to_v52i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v25, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v23, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v21, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v19, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v9, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v15, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v11, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v8, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v53, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v52, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v37, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v65, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v36, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v35, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v34, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16527,129 +17021,295 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v26f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v26f32_scalar:
@@ -18769,178 +19429,340 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr38
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v26f32_to_v52f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v25, s1
-; GFX11-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v23, s3
-; GFX11-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v21, s17
-; GFX11-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v19, s19
-; GFX11-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v9, s22
-; GFX11-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v15, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v11, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v8, v8, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v11, v64, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v12, v55, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v53, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v52, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v48, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v16, v51, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v50, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v49, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v37, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v33
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v67, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v9, v66, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v65, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v13, v54, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v23, v36, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v24, v35, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v25, v34, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v32
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr8
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v26f32_to_v52f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v26f32_to_v52f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v25, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v23, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v21, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v19, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v9, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v15, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v11, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v8, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v53, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v52, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v37, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v65, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v36, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v35, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v34, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21183,129 +22005,295 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v26f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v26f32_scalar:
@@ -25980,129 +26968,295 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v13i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v13i64_scalar:
@@ -30655,129 +31809,295 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v13i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v13i64_scalar:
@@ -32378,178 +33698,340 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr25
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v13f64_to_v52i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v27, s1
-; GFX11-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v25, s3
-; GFX11-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v23, s17
-; GFX11-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v21, s19
-; GFX11-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
-; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v13, s24 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT: v_dual_mov_b32 v11, s26 :: v_dual_mov_b32 v12, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v8, v8, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v65, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v64, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v27, v66, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v54, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v53, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v52, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v48, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v11, v55, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v15, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v49, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v37, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v33
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v67, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v16, v51, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v50, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v23, v36, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v24, v35, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v25, v34, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v32
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr8
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr15
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v13f64_to_v52i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v13f64_to_v52i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v27, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v25, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v23, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v21, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s24 :: v_dual_mov_b32 v14, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s26 :: v_dual_mov_b32 v12, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v64, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v66, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v52, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v55, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v15, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v37, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v36, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v35, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v34, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34516,129 +35998,295 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v13f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v13f64_scalar:
@@ -36667,178 +38315,340 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr25
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v13f64_to_v52f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v27, s1
-; GFX11-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v25, s3
-; GFX11-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v23, s17
-; GFX11-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v21, s19
-; GFX11-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
-; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v13, s24 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT: v_dual_mov_b32 v11, s26 :: v_dual_mov_b32 v12, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v8, v8, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v65, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v64, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v27, v66, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v54, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v53, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v52, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v48, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v11, v55, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v15, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v49, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v37, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v33
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v67, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v16, v51, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v50, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v23, v36, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v24, v35, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v25, v34, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v32
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr8
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr15
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v13f64_to_v52f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v13f64_to_v52f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v27, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v25, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v23, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v21, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s24 :: v_dual_mov_b32 v14, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s26 :: v_dual_mov_b32 v12, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v64, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v66, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v52, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v55, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v15, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v37, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v36, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v35, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v34, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -39081,129 +40891,295 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v13f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v13f64_scalar:
@@ -41806,23 +43782,12 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v52f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -41844,26 +43809,25 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v26.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
@@ -41882,71 +43846,79 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s29, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s28, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v31, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v33, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v32, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s29 :: v_dual_mov_b32 v17, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s27 :: v_dual_mov_b32 v12, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s23 :: v_dual_mov_b32 v8, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s19 :: v_dual_mov_b32 v31, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s17 :: v_dual_mov_b32 v27, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s3 :: v_dual_mov_b32 v29, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s1 :: v_dual_mov_b32 v33, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s45 :: v_dual_mov_b32 v35, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s43 :: v_dual_mov_b32 v37, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s40 :: v_dual_mov_b32 v39, s15
@@ -41957,62 +43929,46 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, s5 :: v_dual_mov_b32 v65, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v66, s4 :: v_dual_mov_b32 v67, s41
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v67, 16, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v26
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v65, 16, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v54, 16, v67
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v64, 16, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v55, 16, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v53, 16, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v50, 16, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v52, 16, v54
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v66, 16, v68
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v51, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v49, 16, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v39, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v35, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v34, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v37, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v4
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v33
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v52f16_scalar:
@@ -44258,23 +46214,12 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v52i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -44296,26 +46241,25 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v26.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
@@ -44334,71 +46278,79 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s29 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v31, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v33, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v32, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s29 :: v_dual_mov_b32 v17, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s27 :: v_dual_mov_b32 v12, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s23 :: v_dual_mov_b32 v8, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s19 :: v_dual_mov_b32 v31, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s17 :: v_dual_mov_b32 v27, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s3 :: v_dual_mov_b32 v29, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s1 :: v_dual_mov_b32 v33, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s45 :: v_dual_mov_b32 v35, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s43 :: v_dual_mov_b32 v37, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s40 :: v_dual_mov_b32 v39, s15
@@ -44409,62 +46361,46 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, s5 :: v_dual_mov_b32 v65, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v66, s4 :: v_dual_mov_b32 v67, s41
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v67, 16, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v26
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v65, 16, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v54, 16, v67
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v64, 16, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v55, 16, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v53, 16, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v50, 16, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v52, 16, v54
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v66, 16, v68
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v51, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v49, 16, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v39, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v35, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v34, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v37, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v4
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v33
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v52i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
index 8ee5b96..8eb71e9 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
@@ -6779,141 +6779,299 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v28i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v28i32_scalar:
@@ -11885,141 +12043,299 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v28i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v28i32_scalar:
@@ -15595,191 +15911,364 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr50
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v28f32_to_v56i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-NEXT: v_dual_mov_b32 v28, s0 :: v_dual_mov_b32 v27, s1
-; GFX11-NEXT: v_dual_mov_b32 v26, s2 :: v_dual_mov_b32 v25, s3
-; GFX11-NEXT: v_dual_mov_b32 v24, s16 :: v_dual_mov_b32 v23, s17
-; GFX11-NEXT: v_dual_mov_b32 v22, s18 :: v_dual_mov_b32 v21, s19
-; GFX11-NEXT: v_dual_mov_b32 v20, s20 :: v_dual_mov_b32 v19, s21
-; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s24
-; GFX11-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v15, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v13, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v28
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v28
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v19, v54, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-NEXT: v_mov_b32_e32 v5, v35
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v49, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v31
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v28, v71, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v53, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v26, v39, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v27, v38, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v30
-; GFX11-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
-; GFX11-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-NEXT: v_mov_b32_e32 v6, v36
-; GFX11-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr10
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v28f32_to_v56i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v28f32_to_v56i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s0 :: v_dual_mov_b32 v27, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s2 :: v_dual_mov_b32 v25, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s16 :: v_dual_mov_b32 v23, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s18 :: v_dual_mov_b32 v21, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s20 :: v_dual_mov_b32 v19, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v15, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v13, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v28
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v28
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v10, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v64, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v54, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v69, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v66, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v52, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v49, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v53, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v48, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v39, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v30
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v34
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17915,141 +18404,299 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v28f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v28f32_scalar:
@@ -20379,191 +21026,364 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr50
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v28f32_to_v56f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-NEXT: v_dual_mov_b32 v28, s0 :: v_dual_mov_b32 v27, s1
-; GFX11-NEXT: v_dual_mov_b32 v26, s2 :: v_dual_mov_b32 v25, s3
-; GFX11-NEXT: v_dual_mov_b32 v24, s16 :: v_dual_mov_b32 v23, s17
-; GFX11-NEXT: v_dual_mov_b32 v22, s18 :: v_dual_mov_b32 v21, s19
-; GFX11-NEXT: v_dual_mov_b32 v20, s20 :: v_dual_mov_b32 v19, s21
-; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s24
-; GFX11-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v15, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v13, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v28
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v28
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v19, v54, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-NEXT: v_mov_b32_e32 v5, v35
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v49, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v31
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v28, v71, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v53, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v26, v39, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v27, v38, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v30
-; GFX11-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
-; GFX11-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-NEXT: v_mov_b32_e32 v6, v36
-; GFX11-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr10
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v28f32_to_v56f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v28f32_to_v56f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s0 :: v_dual_mov_b32 v27, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s2 :: v_dual_mov_b32 v25, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s16 :: v_dual_mov_b32 v23, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s18 :: v_dual_mov_b32 v21, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s20 :: v_dual_mov_b32 v19, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v15, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v13, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v28
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v28
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v10, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v64, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v54, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v69, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v66, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v52, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v49, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v53, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v48, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v39, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v30
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v34
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23006,141 +23826,299 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v28f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v28f32_scalar:
@@ -28216,141 +29194,299 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v14i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v14i64_scalar:
@@ -33336,141 +34472,299 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v14i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v14i64_scalar:
@@ -35225,191 +36519,364 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr27
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v14f64_to_v56i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-NEXT: v_dual_mov_b32 v27, s0 :: v_dual_mov_b32 v28, s1
-; GFX11-NEXT: v_dual_mov_b32 v25, s2 :: v_dual_mov_b32 v26, s3
-; GFX11-NEXT: v_dual_mov_b32 v23, s16 :: v_dual_mov_b32 v24, s17
-; GFX11-NEXT: v_dual_mov_b32 v21, s18 :: v_dual_mov_b32 v22, s19
-; GFX11-NEXT: v_dual_mov_b32 v19, s20 :: v_dual_mov_b32 v20, s21
-; GFX11-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
-; GFX11-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v18, s25
-; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
-; GFX11-NEXT: v_add_f64 v[17:18], v[17:18], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[19:20], v[19:20], 1.0
-; GFX11-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
-; GFX11-NEXT: v_add_f64 v[23:24], v[23:24], 1.0
-; GFX11-NEXT: v_add_f64 v[25:26], v[25:26], 1.0
-; GFX11-NEXT: v_add_f64 v[27:28], v[27:28], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v28, v71, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v19, v54, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_mov_b32_e32 v6, v36
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v26
-; GFX11-NEXT: v_mov_b32_e32 v9, v29
-; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v27, v38, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v53, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v26, v39, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v33
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v24, v49, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v0, v30 :: v_dual_mov_b32 v1, v31
-; GFX11-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v5, v35
-; GFX11-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_mov_b32 v8, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr10
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v14f64_to_v56i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v14f64_to_v56i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s0 :: v_dual_mov_b32 v28, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s2 :: v_dual_mov_b32 v26, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s16 :: v_dual_mov_b32 v24, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s18 :: v_dual_mov_b32 v22, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s20 :: v_dual_mov_b32 v20, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v18, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v14, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], v[17:18], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], v[19:20], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[23:24], v[23:24], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[25:26], v[25:26], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[27:28], v[27:28], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v27
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v71, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v66, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, v29
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v69, 16, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v68, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v64, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v52, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v34
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v67, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v65, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v53, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v39, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v49, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v48, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v30 :: v_dual_mov_b32 v1, v31
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v5, v35
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_mov_b32 v8, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -37545,141 +39012,299 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v14f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v14f64_scalar:
@@ -39918,191 +41543,364 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr27
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v14f64_to_v56f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-NEXT: v_dual_mov_b32 v27, s0 :: v_dual_mov_b32 v28, s1
-; GFX11-NEXT: v_dual_mov_b32 v25, s2 :: v_dual_mov_b32 v26, s3
-; GFX11-NEXT: v_dual_mov_b32 v23, s16 :: v_dual_mov_b32 v24, s17
-; GFX11-NEXT: v_dual_mov_b32 v21, s18 :: v_dual_mov_b32 v22, s19
-; GFX11-NEXT: v_dual_mov_b32 v19, s20 :: v_dual_mov_b32 v20, s21
-; GFX11-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
-; GFX11-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v18, s25
-; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
-; GFX11-NEXT: v_add_f64 v[17:18], v[17:18], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[19:20], v[19:20], 1.0
-; GFX11-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
-; GFX11-NEXT: v_add_f64 v[23:24], v[23:24], 1.0
-; GFX11-NEXT: v_add_f64 v[25:26], v[25:26], 1.0
-; GFX11-NEXT: v_add_f64 v[27:28], v[27:28], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v28, v71, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v19, v54, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_mov_b32_e32 v6, v36
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v26
-; GFX11-NEXT: v_mov_b32_e32 v9, v29
-; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v27, v38, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v53, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v26, v39, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v33
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v24, v49, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v0, v30 :: v_dual_mov_b32 v1, v31
-; GFX11-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v5, v35
-; GFX11-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_mov_b32 v8, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr10
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v14f64_to_v56f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v14f64_to_v56f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s0 :: v_dual_mov_b32 v28, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s2 :: v_dual_mov_b32 v26, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s16 :: v_dual_mov_b32 v24, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s18 :: v_dual_mov_b32 v22, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s20 :: v_dual_mov_b32 v20, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v18, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v14, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], v[17:18], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], v[19:20], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[23:24], v[23:24], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[25:26], v[25:26], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[27:28], v[27:28], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v27
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v71, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v66, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, v29
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v69, 16, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v68, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v64, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v52, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v34
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v67, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v65, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v53, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v39, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v49, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v48, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v30 :: v_dual_mov_b32 v1, v31
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v5, v35
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_mov_b32 v8, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -42545,141 +44343,299 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v14f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v14f64_scalar:
@@ -45566,27 +47522,13 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v56f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v27.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
@@ -45608,30 +47550,29 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v27.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v28.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s42
@@ -45650,75 +47591,85 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s25, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v33, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v34, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v32, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v31, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v37, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v36, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v37
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v36
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v35
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v10, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s21 :: v_dual_mov_b32 v29, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s19 :: v_dual_mov_b32 v34, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s17 :: v_dual_mov_b32 v36, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s3 :: v_dual_mov_b32 v30, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s1 :: v_dual_mov_b32 v32, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s45 :: v_dual_mov_b32 v39, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s42 :: v_dual_mov_b32 v49, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s40 :: v_dual_mov_b32 v51, s15
@@ -45729,69 +47680,49 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v68, s5 :: v_dual_mov_b32 v69, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s43
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v70, 16, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v68, 16, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v80, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v35, v66, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v71, 16, v32
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v69, 16, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v37, v64, 16, v70
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v35 :: v_dual_and_b32 v0, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v34, v67, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v36, v65, 16, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v55, 16, v29
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v54, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v53, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v52, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v51, 16, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v50, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v49, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v39, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v38, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v27, 16, v4
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v30
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v32
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v36 :: v_dual_mov_b32 v7, v37
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v56f16_scalar:
@@ -48280,27 +50211,13 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v56i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v27.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
@@ -48322,30 +50239,29 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v27.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v28.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s42
@@ -48364,75 +50280,85 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s29 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v33, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v34, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v32, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v31, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v37, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v36, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v37
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v36
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v35
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v10, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s21 :: v_dual_mov_b32 v29, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s19 :: v_dual_mov_b32 v34, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s17 :: v_dual_mov_b32 v36, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s3 :: v_dual_mov_b32 v30, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s1 :: v_dual_mov_b32 v32, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s45 :: v_dual_mov_b32 v39, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s42 :: v_dual_mov_b32 v49, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s40 :: v_dual_mov_b32 v51, s15
@@ -48443,69 +50369,49 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v68, s5 :: v_dual_mov_b32 v69, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s43
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v70, 16, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v68, 16, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v80, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v35, v66, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v71, 16, v32
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v69, 16, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v37, v64, 16, v70
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v35 :: v_dual_and_b32 v0, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v34, v67, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v36, v65, 16, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v55, 16, v29
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v54, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v53, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v52, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v51, 16, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v50, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v49, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v39, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v38, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v27, 16, v4
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v30
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v32
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v36 :: v_dual_mov_b32 v7, v37
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v56i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
index 967f1a9..93c11f1 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
@@ -7240,153 +7240,305 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v30i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, v30, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v30i32_scalar:
@@ -12840,153 +12992,305 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v30i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, v30 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v30i32_scalar:
@@ -16802,204 +17106,388 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v30f32_to_v60i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v29, s1
-; GFX11-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v27, s3
-; GFX11-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v25, s17
-; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v23, s19
-; GFX11-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v21, s21
-; GFX11-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v13, s26
-; GFX11-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
-; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v30, 1.0, v30
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v68, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v39, v39, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v49, v49, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v13, v82, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v81, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v70, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v69, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v21, v66, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v38, v38, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_lshl_or_b32 v20, v67, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v22, v65, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v23, v64, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-NEXT: v_mov_b32_e32 v5, v49
-; GFX11-NEXT: v_lshl_or_b32 v48, v48, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v53, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v30
-; GFX11-NEXT: v_mov_b32_e32 v7, v31
-; GFX11-NEXT: v_lshl_or_b32 v30, v83, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v27, v52, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v29, v50, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
-; GFX11-NEXT: v_mov_b32_e32 v4, v48
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr12
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v30f32_to_v60i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v30f32_to_v60i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v29, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v27, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v25, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v23, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v21, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v19, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v13, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v30, 1.0, v30
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v68, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v39, v39, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v49, v49, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v81, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v70, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v69, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v66, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v38, v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v67, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v64, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v49
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v48, v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v53, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v52, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v51, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v50, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v48
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19290,153 +19778,305 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v30f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, v30, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v30f32_scalar:
@@ -21985,204 +22625,388 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v30f32_to_v60f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v29, s1
-; GFX11-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v27, s3
-; GFX11-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v25, s17
-; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v23, s19
-; GFX11-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v21, s21
-; GFX11-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v13, s26
-; GFX11-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
-; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v30, 1.0, v30
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v68, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v39, v39, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v49, v49, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v13, v82, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v81, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v70, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v69, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v21, v66, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v38, v38, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_lshl_or_b32 v20, v67, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v22, v65, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v23, v64, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-NEXT: v_mov_b32_e32 v5, v49
-; GFX11-NEXT: v_lshl_or_b32 v48, v48, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v53, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v30
-; GFX11-NEXT: v_mov_b32_e32 v7, v31
-; GFX11-NEXT: v_lshl_or_b32 v30, v83, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v27, v52, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v29, v50, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
-; GFX11-NEXT: v_mov_b32_e32 v4, v48
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr12
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v30f32_to_v60f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v30f32_to_v60f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v29, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v27, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v25, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v23, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v21, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v19, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v13, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v30, 1.0, v30
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v68, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v39, v39, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v49, v49, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v81, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v70, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v69, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v66, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v38, v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v67, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v64, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v49
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v48, v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v53, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v52, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v51, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v50, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v48
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24867,153 +25691,305 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v30f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, v30 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v30f32_scalar:
@@ -30472,153 +31448,305 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v15i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, v30, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v15i64_scalar:
@@ -36089,153 +37217,305 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v15i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, v30 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v15i64_scalar:
@@ -38144,204 +39424,388 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v15f64_to_v60i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v31, s1
-; GFX11-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v29, s3
-; GFX11-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v27, s17
-; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v25, s19
-; GFX11-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v23, s21
-; GFX11-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v21, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v19, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshl_or_b32 v48, v48, 16, v26
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v13, v13, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v68, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v31
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v39, v39, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_lshl_or_b32 v31, v82, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v69, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v21, v66, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v38, v38, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v20, v67, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v22, v65, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v23, v64, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-NEXT: v_mov_b32_e32 v7, v31
-; GFX11-NEXT: v_lshl_or_b32 v49, v49, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v53, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v30
-; GFX11-NEXT: v_mov_b32_e32 v9, v33
-; GFX11-NEXT: v_lshl_or_b32 v30, v83, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v14, v81, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v70, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v27, v52, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v29, v50, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
-; GFX11-NEXT: v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v5, v49
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: v_mov_b32_e32 v8, v32
-; GFX11-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr12
-; GFX11-NEXT: ; implicit-def: $vgpr13
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v15f64_to_v60i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v15f64_to_v60i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v31, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v29, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v27, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v25, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v23, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v21, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v19, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v48, v48, 16, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v13, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v68, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v31
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v39, v39, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v82, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v69, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v66, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v38, v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v67, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v64, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v49, v49, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v53, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v52, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v51, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v50, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v5, v49
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -40632,153 +42096,305 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v15f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, v30, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v15f64_scalar:
@@ -43227,204 +44843,388 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v15f64_to_v60f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v31, s1
-; GFX11-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v29, s3
-; GFX11-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v27, s17
-; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v25, s19
-; GFX11-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v23, s21
-; GFX11-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v21, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v19, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshl_or_b32 v48, v48, 16, v26
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v13, v13, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v68, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v31
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v39, v39, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_lshl_or_b32 v31, v82, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v69, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v21, v66, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v38, v38, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v20, v67, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v22, v65, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v23, v64, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-NEXT: v_mov_b32_e32 v7, v31
-; GFX11-NEXT: v_lshl_or_b32 v49, v49, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v53, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v30
-; GFX11-NEXT: v_mov_b32_e32 v9, v33
-; GFX11-NEXT: v_lshl_or_b32 v30, v83, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v14, v81, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v70, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v27, v52, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v29, v50, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
-; GFX11-NEXT: v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v5, v49
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: v_mov_b32_e32 v8, v32
-; GFX11-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr12
-; GFX11-NEXT: ; implicit-def: $vgpr13
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v15f64_to_v60f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v15f64_to_v60f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v31, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v29, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v27, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v25, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v23, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v21, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v19, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v48, v48, 16, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v13, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v68, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v31
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v39, v39, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v82, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v69, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v66, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v38, v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v67, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v64, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v49, v49, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v53, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v52, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v51, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v50, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v5, v49
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -46109,153 +47909,305 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v15f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, v30 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v15f64_scalar:
@@ -49421,31 +51373,14 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v60f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
@@ -49467,34 +51402,33 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v29.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v27.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v30.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v29, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v28, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s42
@@ -49513,79 +51447,91 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s27, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v34, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v31, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v32, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v33, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v49, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v48, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v39, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v38, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v37, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v36, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v48
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v39
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v38
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v37
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v36
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v49
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v35
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v12, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s23 :: v_dual_mov_b32 v35, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s21 :: v_dual_mov_b32 v31, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v33, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, s17 :: v_dual_mov_b32 v36, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s3 :: v_dual_mov_b32 v38, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, s1 :: v_dual_mov_b32 v48, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s44 :: v_dual_mov_b32 v51, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s42 :: v_dual_mov_b32 v53, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s40 :: v_dual_mov_b32 v55, s15
@@ -49596,75 +51542,52 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s5 :: v_dual_mov_b32 v81, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v82, s4 :: v_dual_mov_b32 v83, s45
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v39
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v49, v70, 16, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v84, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v37, v82, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v39, v80, 16, v84
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v28, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v38, v81, 16, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v85, 0xffff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v80, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v27, 16, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v36, v83, 16, v48
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v48, v71, 16, v85
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v69, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v68, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v66, 16, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v67, 16, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v34, v65, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v35, v64, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v55, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v53, 16, v67
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v54, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v52, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v51, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v50, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v29, 16, v4
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v60f16_scalar:
@@ -52368,31 +54291,14 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v60i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
@@ -52414,34 +54320,33 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v29.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v27.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v30.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v29, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v28, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s42
@@ -52460,79 +54365,91 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v34, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v31, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v32, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v33, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v49, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v48, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v39, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v38, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v37, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v36, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v48
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v39
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v38
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v37
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v36
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v49
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v35
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v12, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s23 :: v_dual_mov_b32 v35, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s21 :: v_dual_mov_b32 v31, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v33, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, s17 :: v_dual_mov_b32 v36, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s3 :: v_dual_mov_b32 v38, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, s1 :: v_dual_mov_b32 v48, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s44 :: v_dual_mov_b32 v51, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s42 :: v_dual_mov_b32 v53, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s40 :: v_dual_mov_b32 v55, s15
@@ -52543,75 +54460,52 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s5 :: v_dual_mov_b32 v81, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v82, s4 :: v_dual_mov_b32 v83, s45
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v39
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v49, v70, 16, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v84, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v37, v82, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v39, v80, 16, v84
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v28, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v38, v81, 16, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v85, 0xffff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v80, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v27, 16, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v36, v83, 16, v48
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v48, v71, 16, v85
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v69, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v68, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v66, 16, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v67, 16, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v34, v65, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v35, v64, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v55, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v53, 16, v67
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v54, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v52, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v51, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v50, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v29, 16, v4
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v60i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
index 9a6ea1b..6ada0cb 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
@@ -2402,89 +2402,171 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v6bf16_to_v3i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v10, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v9, v6, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v6
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_and_b32 v1, 0xffff, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v2, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v9, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v4, 16, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_mov_b32_e32 v2, s2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v6bf16_to_v3i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-TRUE16-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v7, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v6.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB11_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB11_2
+; GFX11-TRUE16-NEXT: .LBB11_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v6bf16_to_v3i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s3, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-FAKE16-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v6
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_and_b32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v4, 16, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB11_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB11_2
+; GFX11-FAKE16-NEXT: .LBB11_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5536,89 +5618,171 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v6bf16_to_v3f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
-; GFX11-NEXT: .LBB27_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v10, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v9, v6, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v6
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_and_b32 v1, 0xffff, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v2, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v9, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v4, 16, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_mov_b32_e32 v2, s2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v6bf16_to_v3f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-TRUE16-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v7, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v6.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB27_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB27_2
+; GFX11-TRUE16-NEXT: .LBB27_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v6bf16_to_v3f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s3, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-FAKE16-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v6
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_and_b32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v4, 16, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB27_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB27_2
+; GFX11-FAKE16-NEXT: .LBB27_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8229,124 +8393,243 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v4, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v6bf16_to_v12i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s13, s2, 16
-; GFX11-NEXT: s_lshr_b32 s12, s2, 8
-; GFX11-NEXT: s_lshr_b32 s8, s1, 24
-; GFX11-NEXT: s_lshr_b32 s14, s1, 16
-; GFX11-NEXT: s_lshr_b32 s9, s1, 8
-; GFX11-NEXT: s_lshr_b32 s11, s0, 16
-; GFX11-NEXT: s_lshr_b32 s10, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s2
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v8
-; GFX11-NEXT: v_mov_b32_e32 v12, 0x7fc07fc0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v0, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v10, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v0, v11, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v3, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v9 :: v_dual_add_nc_u32 v3, v3, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshl_or_b32 v11, v7, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 8, v11
-; GFX11-NEXT: v_lshrrev_b64 v[11:12], 24, v[11:12]
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: v_mov_b32_e32 v4, v13
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s12
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s10
-; GFX11-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s8
-; GFX11-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v5, s9
-; GFX11-NEXT: v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v11, s6
-; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v4, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v6bf16_to_v12i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s2, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s1, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s1, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s0, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-TRUE16-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s1, 0, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v12, 0x7fc07fc0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v8, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v13.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v6.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v8.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v3.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[11:12]
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v13
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB39_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: s_branch .LBB39_2
+; GFX11-TRUE16-NEXT: .LBB39_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v5, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v11, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v4, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v6bf16_to_v12i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s3, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s2, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s1, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s1, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s0, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s0, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-FAKE16-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v12, 0x7fc07fc0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v9 :: v_dual_add_nc_u32 v3, v3, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v7, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 8, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[11:12]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v13
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB39_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: s_branch .LBB39_2
+; GFX11-FAKE16-NEXT: .LBB39_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v5, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v11, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v4, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11712,89 +11995,169 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v6bf16_to_v6f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s2
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v2
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v9 :: v_dual_add_nc_u32 v1, v1, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v2, v4, 16, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v6bf16_to_v6f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v4.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v6.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v6bf16_to_v6f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v9 :: v_dual_add_nc_u32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v4, 16, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12306,64 +12669,57 @@ define <6 x i16> @bitcast_v6bf16_to_v6i16(<6 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, 0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v9, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v9, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v6
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v10, v9, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v1, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v9, v11, v3, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc_lo
-; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v7, v10, v1, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v12, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v9, v10 :: v_dual_add_f32 v0, 0x40c00000, v0
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 0x40c00000, v1 :: v_dual_add_f32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v12, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
-; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v7, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v8, v2, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v0, 16, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v1, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v5, 16, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v2, v8, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v4.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v8, v9, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v7, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.h
; GFX11-TRUE16-NEXT: .LBB52_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -12651,80 +13007,151 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v6bf16_to_v6i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v4, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v10, v6
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_add_nc_u32 v7, v7, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v9, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v4, v5
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v3, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v6bf16_to_v6i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v4, v9 :: v_dual_add_nc_u32 v9, v10, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v4.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v2, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v9, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v6.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v6bf16_to_v6i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v4, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v10, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_add_nc_u32 v7, v7, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v4, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v3, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
index e71bf15..e34aaf20 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
@@ -136,7 +136,7 @@ define i32 @select_sdiv_lhs_opaque_const0_i32(i1 %cond) {
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: v_cndmask_b32_e32 v0, 5, v1, vcc
; GCN-NEXT: v_sub_u32_e32 v1, vcc, 0, v0
-; GCN-NEXT: v_max_i32_e32 v1, v0, v1
+; GCN-NEXT: v_max_i32_e32 v1, v1, v0
; GCN-NEXT: v_cvt_f32_u32_e32 v2, v1
; GCN-NEXT: v_sub_u32_e32 v3, vcc, 0, v1
; GCN-NEXT: s_mov_b32 s4, 0xf4240
@@ -218,7 +218,7 @@ define i32 @select_sdiv_lhs_opaque_const1_i32(i1 %cond) {
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: v_cndmask_b32_e64 v0, v1, 5, vcc
; GCN-NEXT: v_sub_u32_e32 v1, vcc, 0, v0
-; GCN-NEXT: v_max_i32_e32 v1, v0, v1
+; GCN-NEXT: v_max_i32_e32 v1, v1, v0
; GCN-NEXT: v_cvt_f32_u32_e32 v2, v1
; GCN-NEXT: v_sub_u32_e32 v3, vcc, 0, v1
; GCN-NEXT: s_mov_b32 s4, 0xf4240
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index e27164c..948811e 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -6191,37 +6191,34 @@ define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX6-NEXT: s_ashr_i32 s8, s3, 31
-; GFX6-NEXT: s_add_i32 s3, s3, s8
-; GFX6-NEXT: s_xor_b32 s3, s3, s8
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX6-NEXT: s_sub_i32 s4, 0, s3
-; GFX6-NEXT: s_ashr_i32 s9, s2, 31
-; GFX6-NEXT: s_add_i32 s2, s2, s9
-; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX6-NEXT: s_xor_b32 s2, s2, s9
+; GFX6-NEXT: s_abs_i32 s8, s3
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s8
+; GFX6-NEXT: s_sub_i32 s4, 0, s8
+; GFX6-NEXT: s_abs_i32 s9, s2
; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0
+; GFX6-NEXT: v_mul_hi_u32 v0, s9, v0
; GFX6-NEXT: v_readfirstlane_b32 s0, v0
-; GFX6-NEXT: s_mul_i32 s0, s0, s3
-; GFX6-NEXT: s_sub_i32 s0, s2, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
+; GFX6-NEXT: s_mul_i32 s0, s0, s8
+; GFX6-NEXT: s_sub_i32 s0, s9, s0
+; GFX6-NEXT: s_sub_i32 s1, s0, s8
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s0, s8
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
; GFX6-NEXT: s_cselect_b32 s0, s1, s0
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s0, s8
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
+; GFX6-NEXT: s_xor_b32 s0, s2, s3
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: s_xor_b32 s0, s9, s8
+; GFX6-NEXT: s_ashr_i32 s0, s0, 31
; GFX6-NEXT: v_xor_b32_e32 v0, s0, v0
; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
@@ -6233,35 +6230,32 @@ define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX9-NEXT: s_ashr_i32 s4, s3, 31
-; GFX9-NEXT: s_add_i32 s3, s3, s4
-; GFX9-NEXT: s_xor_b32 s3, s3, s4
-; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX9-NEXT: s_sub_i32 s6, 0, s3
-; GFX9-NEXT: s_ashr_i32 s5, s2, 31
-; GFX9-NEXT: s_add_i32 s2, s2, s5
+; GFX9-NEXT: s_abs_i32 s4, s3
+; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s4
+; GFX9-NEXT: s_sub_i32 s6, 0, s4
+; GFX9-NEXT: s_abs_i32 s5, s2
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX9-NEXT: s_xor_b32 s2, s2, s5
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
; GFX9-NEXT: s_mul_i32 s6, s6, s7
; GFX9-NEXT: s_mul_hi_u32 s6, s7, s6
; GFX9-NEXT: s_add_i32 s7, s7, s6
-; GFX9-NEXT: s_mul_hi_u32 s6, s2, s7
-; GFX9-NEXT: s_mul_i32 s8, s6, s3
-; GFX9-NEXT: s_sub_i32 s2, s2, s8
+; GFX9-NEXT: s_mul_hi_u32 s6, s5, s7
+; GFX9-NEXT: s_mul_i32 s8, s6, s4
+; GFX9-NEXT: s_sub_i32 s5, s5, s8
; GFX9-NEXT: s_add_i32 s7, s6, 1
-; GFX9-NEXT: s_sub_i32 s8, s2, s3
-; GFX9-NEXT: s_cmp_ge_u32 s2, s3
+; GFX9-NEXT: s_sub_i32 s8, s5, s4
+; GFX9-NEXT: s_cmp_ge_u32 s5, s4
; GFX9-NEXT: s_cselect_b32 s6, s7, s6
-; GFX9-NEXT: s_cselect_b32 s2, s8, s2
+; GFX9-NEXT: s_cselect_b32 s5, s8, s5
; GFX9-NEXT: s_add_i32 s7, s6, 1
-; GFX9-NEXT: s_cmp_ge_u32 s2, s3
-; GFX9-NEXT: s_cselect_b32 s2, s7, s6
-; GFX9-NEXT: s_xor_b32 s3, s5, s4
+; GFX9-NEXT: s_cmp_ge_u32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s7, s6
; GFX9-NEXT: s_xor_b32 s2, s2, s3
-; GFX9-NEXT: s_sub_i32 s2, s2, s3
+; GFX9-NEXT: s_ashr_i32 s2, s2, 31
+; GFX9-NEXT: s_xor_b32 s3, s4, s2
+; GFX9-NEXT: s_sub_i32 s2, s3, s2
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: global_store_dword v1, v0, s[0:1]
; GFX9-NEXT: s_endpgm
@@ -6706,38 +6700,37 @@ define amdgpu_kernel void @srem_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-LABEL: srem_i32_pow2_shl_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX6-NEXT: s_mov_b32 s7, 0xf000
+; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX6-NEXT: s_ashr_i32 s4, s3, 31
-; GFX6-NEXT: s_add_i32 s3, s3, s4
-; GFX6-NEXT: s_xor_b32 s4, s3, s4
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s4
-; GFX6-NEXT: s_sub_i32 s3, 0, s4
-; GFX6-NEXT: s_ashr_i32 s5, s2, 31
-; GFX6-NEXT: s_add_i32 s2, s2, s5
+; GFX6-NEXT: s_abs_i32 s3, s3
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
+; GFX6-NEXT: s_sub_i32 s4, 0, s3
+; GFX6-NEXT: s_abs_i32 s8, s2
+; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX6-NEXT: s_xor_b32 s6, s2, s5
-; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s3, v0
-; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
+; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0
-; GFX6-NEXT: v_readfirstlane_b32 s7, v0
-; GFX6-NEXT: s_mul_i32 s7, s7, s4
-; GFX6-NEXT: s_sub_i32 s6, s6, s7
-; GFX6-NEXT: s_sub_i32 s7, s6, s4
-; GFX6-NEXT: s_cmp_ge_u32 s6, s4
-; GFX6-NEXT: s_cselect_b32 s6, s7, s6
-; GFX6-NEXT: s_sub_i32 s7, s6, s4
-; GFX6-NEXT: s_cmp_ge_u32 s6, s4
-; GFX6-NEXT: s_cselect_b32 s4, s7, s6
-; GFX6-NEXT: s_xor_b32 s4, s4, s5
-; GFX6-NEXT: s_sub_i32 s4, s4, s5
-; GFX6-NEXT: v_mov_b32_e32 v0, s4
-; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0
+; GFX6-NEXT: v_readfirstlane_b32 s0, v0
+; GFX6-NEXT: s_mul_i32 s0, s0, s3
+; GFX6-NEXT: s_sub_i32 s0, s8, s0
+; GFX6-NEXT: s_sub_i32 s1, s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cselect_b32 s0, s1, s0
+; GFX6-NEXT: s_sub_i32 s1, s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cselect_b32 s0, s1, s0
+; GFX6-NEXT: s_ashr_i32 s1, s2, 31
+; GFX6-NEXT: s_xor_b32 s0, s0, s1
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: v_mov_b32_e32 v0, s0
+; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: srem_i32_pow2_shl_denom:
@@ -6746,32 +6739,29 @@ define amdgpu_kernel void @srem_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX9-NEXT: s_ashr_i32 s4, s3, 31
-; GFX9-NEXT: s_add_i32 s3, s3, s4
-; GFX9-NEXT: s_xor_b32 s3, s3, s4
+; GFX9-NEXT: s_abs_i32 s3, s3
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT: s_sub_i32 s5, 0, s3
-; GFX9-NEXT: s_ashr_i32 s4, s2, 31
-; GFX9-NEXT: s_add_i32 s2, s2, s4
+; GFX9-NEXT: s_abs_i32 s4, s2
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX9-NEXT: s_xor_b32 s2, s2, s4
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT: v_readfirstlane_b32 s6, v0
; GFX9-NEXT: s_mul_i32 s5, s5, s6
; GFX9-NEXT: s_mul_hi_u32 s5, s6, s5
; GFX9-NEXT: s_add_i32 s6, s6, s5
-; GFX9-NEXT: s_mul_hi_u32 s5, s2, s6
+; GFX9-NEXT: s_mul_hi_u32 s5, s4, s6
; GFX9-NEXT: s_mul_i32 s5, s5, s3
-; GFX9-NEXT: s_sub_i32 s2, s2, s5
-; GFX9-NEXT: s_sub_i32 s5, s2, s3
-; GFX9-NEXT: s_cmp_ge_u32 s2, s3
-; GFX9-NEXT: s_cselect_b32 s2, s5, s2
-; GFX9-NEXT: s_sub_i32 s5, s2, s3
-; GFX9-NEXT: s_cmp_ge_u32 s2, s3
-; GFX9-NEXT: s_cselect_b32 s2, s5, s2
-; GFX9-NEXT: s_xor_b32 s2, s2, s4
-; GFX9-NEXT: s_sub_i32 s2, s2, s4
+; GFX9-NEXT: s_sub_i32 s4, s4, s5
+; GFX9-NEXT: s_sub_i32 s5, s4, s3
+; GFX9-NEXT: s_cmp_ge_u32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_sub_i32 s5, s4, s3
+; GFX9-NEXT: s_cmp_ge_u32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s3, s5, s4
+; GFX9-NEXT: s_ashr_i32 s2, s2, 31
+; GFX9-NEXT: s_xor_b32 s3, s3, s2
+; GFX9-NEXT: s_sub_i32 s2, s3, s2
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: global_store_dword v1, v0, s[0:1]
; GFX9-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll b/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll
index 861621b..c1b8bc6 100644
--- a/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll
@@ -410,26 +410,14 @@ define void @undef_lo2_v4i16(<2 x i16> %arg0) {
; GFX11-FAKE16-NEXT: ;;#ASMEND
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-TRUE16-SDAG-LABEL: undef_lo2_v4i16:
-; GFX11-TRUE16-SDAG: ; %bb.0:
-; GFX11-TRUE16-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-SDAG-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-SDAG-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-TRUE16-SDAG-NEXT: ;;#ASMSTART
-; GFX11-TRUE16-SDAG-NEXT: ; use v[0:1]
-; GFX11-TRUE16-SDAG-NEXT: ;;#ASMEND
-; GFX11-TRUE16-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-TRUE16-GISEL-LABEL: undef_lo2_v4i16:
-; GFX11-TRUE16-GISEL: ; %bb.0:
-; GFX11-TRUE16-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-GISEL-NEXT: v_mov_b16_e32 v0.l, v0.h
-; GFX11-TRUE16-GISEL-NEXT: ;;#ASMSTART
-; GFX11-TRUE16-GISEL-NEXT: ; use v[0:1]
-; GFX11-TRUE16-GISEL-NEXT: ;;#ASMEND
-; GFX11-TRUE16-GISEL-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: undef_lo2_v4i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: ;;#ASMSTART
+; GFX11-TRUE16-NEXT: ; use v[0:1]
+; GFX11-TRUE16-NEXT: ;;#ASMEND
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
%undef.lo = shufflevector <2 x i16> %arg0, <2 x i16> poison, <4 x i32> <i32 1, i32 1, i32 2, i32 3>
call void asm sideeffect "; use $0", "v"(<4 x i16> %undef.lo);
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/bypass-div.ll b/llvm/test/CodeGen/AMDGPU/bypass-div.ll
index 3cf70c4..d7d697e 100644
--- a/llvm/test/CodeGen/AMDGPU/bypass-div.ll
+++ b/llvm/test/CodeGen/AMDGPU/bypass-div.ll
@@ -576,11 +576,11 @@ define i32 @sdiv32(i32 %a, i32 %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_sub_u32_e32 v2, 0, v1
-; GFX9-NEXT: v_max_i32_e32 v2, v1, v2
+; GFX9-NEXT: v_max_i32_e32 v2, v2, v1
; GFX9-NEXT: v_cvt_f32_u32_e32 v3, v2
; GFX9-NEXT: v_sub_u32_e32 v4, 0, v2
; GFX9-NEXT: v_sub_u32_e32 v5, 0, v0
-; GFX9-NEXT: v_max_i32_e32 v5, v0, v5
+; GFX9-NEXT: v_max_i32_e32 v5, v5, v0
; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v3
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v0
@@ -640,11 +640,11 @@ define i32 @srem32(i32 %a, i32 %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_sub_u32_e32 v2, 0, v1
-; GFX9-NEXT: v_max_i32_e32 v1, v1, v2
+; GFX9-NEXT: v_max_i32_e32 v1, v2, v1
; GFX9-NEXT: v_cvt_f32_u32_e32 v2, v1
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v1
; GFX9-NEXT: v_sub_u32_e32 v4, 0, v0
-; GFX9-NEXT: v_max_i32_e32 v4, v0, v4
+; GFX9-NEXT: v_max_i32_e32 v4, v4, v0
; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v2
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v0
; GFX9-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
diff --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
index 9c59b42..ab96dcf 100644
--- a/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
@@ -563,10 +563,9 @@ define i32 @divergent_vec_i16_HH(i32 %a, i32 %b) {
; GFX11-TRUE16-LABEL: divergent_vec_i16_HH:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: divergent_vec_i16_HH:
diff --git a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-f16-true16.mir b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-f16-true16.mir
index 043bcc3..f64615d 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-f16-true16.mir
+++ b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-f16-true16.mir
@@ -264,3 +264,90 @@ body: |
$sgpr0 = COPY %16:sreg_32
SI_RETURN_TO_EPILOG $sgpr0
...
+
+---
+name: s_pack_ll_b32_b16
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_ll_b32_b16
+ ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[DEF]], implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B32_e32_]].lo16, %subreg.lo16, [[DEF1]].lo16, %subreg.hi16
+ %0:sreg_32 = IMPLICIT_DEF
+ %1:vgpr_32 = IMPLICIT_DEF
+ %2:sreg_32 = COPY %1:vgpr_32
+ %3:sreg_32 = S_PACK_LL_B32_B16 %0:sreg_32, %2:sreg_32, implicit-def dead $scc
+...
+
+---
+name: s_pack_lh_b32_b16
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_lh_b32_b16
+ ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[DEF]], implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B32_e32_]].lo16, %subreg.lo16, [[DEF1]].hi16, %subreg.hi16
+ %0:sreg_32 = IMPLICIT_DEF
+ %1:vgpr_32 = IMPLICIT_DEF
+ %2:sreg_32 = COPY %1:vgpr_32
+ %3:sreg_32 = S_PACK_LH_B32_B16 %0:sreg_32, %2:sreg_32, implicit-def dead $scc
+...
+
+---
+name: s_pack_hl_b32_b16
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_hl_b32_b16
+ ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[DEF]], implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B32_e32_]].hi16, %subreg.lo16, [[DEF1]].lo16, %subreg.hi16
+ %0:sreg_32 = IMPLICIT_DEF
+ %1:vgpr_32 = IMPLICIT_DEF
+ %2:sreg_32 = COPY %1:vgpr_32
+ %3:sreg_32 = S_PACK_HL_B32_B16 %0:sreg_32, %2:sreg_32, implicit-def dead $scc
+...
+
+---
+name: s_pack_hh_b32_b16
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_hh_b32_b16
+ ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[DEF]], implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B32_e32_]].hi16, %subreg.lo16, [[DEF1]].hi16, %subreg.hi16
+ %0:sreg_32 = IMPLICIT_DEF
+ %1:vgpr_32 = IMPLICIT_DEF
+ %2:sreg_32 = COPY %1:vgpr_32
+ %3:sreg_32 = S_PACK_HH_B32_B16 %0:sreg_32, %2:sreg_32, implicit-def dead $scc
+...
+
+---
+name: s_pack_ll_b32_b16_use_SALU16
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_ll_b32_b16_use_SALU16
+ ; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_FMAC_F16_t16_e64_:%[0-9]+]]:vgpr_16 = V_FMAC_F16_t16_e64 0, [[DEF]].lo16, 0, [[DEF]].lo16, 0, [[DEF]].lo16, 0, 0, 0, implicit $mode, implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_FMAC_F16_t16_e64_]], %subreg.lo16, [[DEF]].lo16, %subreg.hi16
+ %0:vgpr_32 = IMPLICIT_DEF
+ %1:sreg_32 = COPY %0:vgpr_32
+ %2:sreg_32 = S_FMAC_F16 %1:sreg_32, %1:sreg_32, %1:sreg_32, implicit $mode
+ %3:sreg_32 = S_PACK_LL_B32_B16 %2:sreg_32, %1:sreg_32, implicit-def dead $scc
+...
+
+---
+name: s_pack_ll_b32_b16_use_imm
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_ll_b32_b16_use_imm
+ ; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B32_e32_]].lo16, %subreg.lo16, [[DEF]].lo16, %subreg.hi16
+ %0:vgpr_32 = IMPLICIT_DEF
+ %1:sreg_32 = COPY %0:vgpr_32
+ %2:sreg_32 = S_PACK_LL_B32_B16 1, %1:sreg_32, implicit-def dead $scc
+...
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
index 76da0aa..10c60df 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
@@ -478,41 +478,76 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_non_bc_src(ptr addrspace(1) %out,
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
;
-; GFX11-LABEL: s_fneg_fabs_v2bf16_non_bc_src:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x8
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, s1, 1.0
-; GFX11-NEXT: v_add_f32_e64 v1, s0, 2.0
-; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: v_mov_b32_e32 v1, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_or_b32_e32 v0, 0x80008000, v0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
-; GFX11-NEXT: s_endpgm
+; GFX11-TRUE16-LABEL: s_fneg_fabs_v2bf16_non_bc_src:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x8
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, s1, 2.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, s0, 1.0
+; GFX11-TRUE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x80008000, v1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: s_fneg_fabs_v2bf16_non_bc_src:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x8
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, s1, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, s0, 2.0
+; GFX11-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, 0x80008000, v0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX11-FAKE16-NEXT: s_endpgm
%add = fadd <2 x bfloat> %in, <bfloat 1.0, bfloat 2.0>
%fabs = call <2 x bfloat> @llvm.fabs.v2bf16(<2 x bfloat> %add)
%fneg.fabs = fsub <2 x bfloat> <bfloat -0.0, bfloat -0.0>, %fabs
@@ -752,42 +787,78 @@ define amdgpu_kernel void @fold_user_fneg_fabs_v2bf16(ptr addrspace(1) %out, <2
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
;
-; GFX11-LABEL: fold_user_fneg_fabs_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x8
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_and_b32 s1, s0, 0x7fff
-; GFX11-NEXT: s_lshr_b32 s0, s0, 16
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0x7fff
-; GFX11-NEXT: v_mul_f32_e64 v0, s1, -4.0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_mul_f32_e64 v1, s0, -4.0
-; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: global_store_b32 v2, v0, s[0:1]
-; GFX11-NEXT: s_endpgm
+; GFX11-TRUE16-LABEL: fold_user_fneg_fabs_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x8
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_lshr_b32 s1, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0x7fff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_mul_f32_e64 v1, s0, -4.0
+; GFX11-TRUE16-NEXT: v_mul_f32_e64 v0, s1, -4.0
+; GFX11-TRUE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_store_b32 v2, v1, s[0:1]
+; GFX11-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: fold_user_fneg_fabs_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x8
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s0, 0x7fff
+; GFX11-FAKE16-NEXT: s_lshr_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX11-FAKE16-NEXT: v_mul_f32_e64 v0, s1, -4.0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_mul_f32_e64 v1, s0, -4.0
+; GFX11-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX11-FAKE16-NEXT: s_endpgm
%fabs = call <2 x bfloat> @llvm.fabs.v2bf16(<2 x bfloat> %in)
%fneg.fabs = fsub <2 x bfloat> <bfloat -0.0, bfloat -0.0>, %fabs
%mul = fmul <2 x bfloat> %fneg.fabs, <bfloat 4.0, bfloat 4.0>
@@ -975,46 +1046,88 @@ define amdgpu_kernel void @s_fneg_multi_use_fabs_foldable_neg_v2bf16(ptr addrspa
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
;
-; GFX11-LABEL: s_fneg_multi_use_fabs_foldable_neg_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x10
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_and_b32 s0, s6, 0x7fff
-; GFX11-NEXT: s_lshr_b32 s1, s6, 16
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_and_b32 s1, s1, 0x7fff
-; GFX11-NEXT: v_mul_f32_e64 v0, s0, -4.0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_mul_f32_e64 v1, s0, -4.0
-; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_and_b32 s4, s6, 0x7fff7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
-; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_store_b32 v2, v3, s[0:1]
-; GFX11-NEXT: global_store_b32 v2, v0, s[2:3]
-; GFX11-NEXT: s_endpgm
+; GFX11-TRUE16-LABEL: s_fneg_multi_use_fabs_foldable_neg_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x10
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_lshr_b32 s0, s6, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s6, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_mul_f32_e64 v0, s0, -4.0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mul_f32_e64 v1, s0, -4.0
+; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s6, 0x7fff7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_clause 0x1
+; GFX11-TRUE16-NEXT: global_store_b32 v2, v3, s[0:1]
+; GFX11-TRUE16-NEXT: global_store_b32 v2, v1, s[2:3]
+; GFX11-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: s_fneg_multi_use_fabs_foldable_neg_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x10
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s6, 0x7fff
+; GFX11-FAKE16-NEXT: s_lshr_b32 s1, s6, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0x7fff
+; GFX11-FAKE16-NEXT: v_mul_f32_e64 v0, s0, -4.0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_mul_f32_e64 v1, s0, -4.0
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s6, 0x7fff7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_clause 0x1
+; GFX11-FAKE16-NEXT: global_store_b32 v2, v3, s[0:1]
+; GFX11-FAKE16-NEXT: global_store_b32 v2, v0, s[2:3]
+; GFX11-FAKE16-NEXT: s_endpgm
%fabs = call <2 x bfloat> @llvm.fabs.v2bf16(<2 x bfloat> %in)
%fneg = fsub <2 x bfloat> <bfloat -0.0, bfloat -0.0>, %fabs
%mul = fmul <2 x bfloat> %fneg, <bfloat 4.0, bfloat 4.0>
diff --git a/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
index 98044a7..84b904f 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
@@ -712,47 +712,88 @@ define amdgpu_kernel void @v_fneg_fold_v2bf16(ptr addrspace(1) %out, ptr addrspa
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
;
-; GFX11-LABEL: v_fneg_fold_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX11-NEXT: v_mov_b32_e32 v0, 0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v1, v0, s[2:3]
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_xor_b32_e32 v3, 0x8000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_xor_b32_e32 v2, 0x8000, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mul_f32 v3, v3, v4 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v1, v1, 16, v2
-; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
-; GFX11-NEXT: s_endpgm
+; GFX11-TRUE16-LABEL: v_fneg_fold_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v1, v0, s[2:3]
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_xor_b32_e32 v3, 0x8000, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_xor_b32_e32 v2, 0x8000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mul_f32 v1, v3, v1 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_mul_f32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: v_fneg_fold_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v1, v0, s[2:3]
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v3, 0x8000, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v2, 0x8000, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mul_f32 v3, v3, v4 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_mul_f32_e32 v1, v2, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v1, 16, v2
+; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-FAKE16-NEXT: s_endpgm
%val = load <2 x bfloat>, ptr addrspace(1) %in
%fsub = fsub <2 x bfloat> <bfloat -0.0, bfloat -0.0>, %val
%fmul = fmul <2 x bfloat> %fsub, %val
diff --git a/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll b/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
index f048dc5..a43292d 100644
--- a/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
@@ -330,11 +330,8 @@ define amdgpu_kernel void @fptosi_v2f16_to_v2i16(
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX11-TRUE16-NEXT: v_cvt_i16_f16_e32 v0.l, v0.l
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cvt_i16_f16_e32 v1.l, v1.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cvt_i16_f16_e32 v0.h, v1.l
; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll b/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
index 96abb3a..96cb621 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
@@ -329,11 +329,8 @@ define amdgpu_kernel void @fptoui_v2f16_to_v2i16(
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX11-TRUE16-NEXT: v_cvt_u16_f16_e32 v0.l, v0.l
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cvt_u16_f16_e32 v1.l, v1.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cvt_u16_f16_e32 v0.h, v1.l
; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/frem.ll b/llvm/test/CodeGen/AMDGPU/frem.ll
index 415828f..35d178c 100644
--- a/llvm/test/CodeGen/AMDGPU/frem.ll
+++ b/llvm/test/CodeGen/AMDGPU/frem.ll
@@ -5972,16 +5972,14 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX11-TRUE16-NEXT: .LBB9_16: ; %Flow54
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v1.l
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v0.l|
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v4.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v2.l, s2
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v3.l|
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, 0x7e00, v7.l, s2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v7.l, s2
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-TRUE16-NEXT: s_endpgm
;
@@ -6422,19 +6420,16 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s2, 0x7c00
; GFX1150-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s2, s2, s3
; GFX1150-TRUE16-NEXT: s_cmp_lg_f16 s5, 0
; GFX1150-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s2
; GFX1150-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s4, 0x7c00
-; GFX1150-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s2, s3, s2
-; GFX1150-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x7e00, v1.l, s2
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1150-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX1150-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s2
; GFX1150-TRUE16-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX1150-TRUE16-NEXT: s_endpgm
;
@@ -6902,20 +6897,17 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1200-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1200-TRUE16-NEXT: s_cmp_nge_f16 s2, 0x7c00
; GFX1200-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1200-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1200-TRUE16-NEXT: s_and_b32 s2, s2, s3
; GFX1200-TRUE16-NEXT: s_cmp_lg_f16 s5, 0
; GFX1200-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s2
; GFX1200-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1200-TRUE16-NEXT: s_cmp_nge_f16 s4, 0x7c00
-; GFX1200-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1200-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX1200-TRUE16-NEXT: s_and_b32 s2, s3, s2
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX1200-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x7e00, v1.l, s2
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1200-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX1200-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s2
; GFX1200-TRUE16-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX1200-TRUE16-NEXT: s_endpgm
;
@@ -9346,29 +9338,23 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX11-TRUE16-NEXT: .LBB10_32: ; %Flow124
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v2.l
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v0.l|
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v6.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v4.l, s2
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v5.l|
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v3.l
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, 0x7e00, v7.l, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v7.l, s2
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v1.l|
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v2, 16, v0
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v10.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x7e00, v8.l, s2
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v9.l|
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v4.l, 0x7e00, v11.l, s2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v4, 16, v1
-; GFX11-TRUE16-NEXT: global_store_b64 v3, v[0:1], s[0:1]
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x7e00, v11.l, s2
+; GFX11-TRUE16-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-TRUE16-NEXT: s_endpgm
;
; GFX11-FAKE16-LABEL: frem_v4f16:
@@ -10209,21 +10195,19 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1150-TRUE16-NEXT: s_cselect_b32 s4, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s3, 0x7c00
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s3, s3, s4
; GFX1150-TRUE16-NEXT: s_cmp_lg_f16 s6, 0
; GFX1150-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s3
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s5, 0x7c00
-; GFX1150-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1150-TRUE16-NEXT: s_cselect_b32 s4, -1, 0
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s3, s4, s3
; GFX1150-TRUE16-NEXT: s_cmp_lg_f16 s2, 0
-; GFX1150-TRUE16-NEXT: v_cndmask_b16 v4.l, 0x7e00, v1.l, s3
+; GFX1150-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s3
; GFX1150-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s8, 0x7c00
-; GFX1150-TRUE16-NEXT: v_lshl_or_b32 v0, v4, 16, v0
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s2, s3, s2
@@ -10232,13 +10216,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v2, 0
; GFX1150-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s7, 0x7c00
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1150-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
+; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s2, s3, s2
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1150-TRUE16-NEXT: v_cndmask_b16 v3.l, 0x7e00, v3.l, s2
-; GFX1150-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX1150-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x7e00, v3.l, s2
; GFX1150-TRUE16-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1150-TRUE16-NEXT: s_endpgm
;
@@ -11147,18 +11128,14 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1200-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s3
; GFX1200-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1200-TRUE16-NEXT: s_cmp_nge_f16 s5, 0x7c00
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1200-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1200-TRUE16-NEXT: s_cselect_b32 s4, -1, 0
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX1200-TRUE16-NEXT: s_and_b32 s3, s4, s3
; GFX1200-TRUE16-NEXT: s_cmp_lg_f16 s2, 0
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX1200-TRUE16-NEXT: v_cndmask_b16 v4.l, 0x7e00, v1.l, s3
+; GFX1200-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s3
; GFX1200-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1200-TRUE16-NEXT: s_cmp_nge_f16 s8, 0x7c00
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1200-TRUE16-NEXT: v_lshl_or_b32 v0, v4, 16, v0
; GFX1200-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX1200-TRUE16-NEXT: s_and_b32 s2, s3, s2
@@ -11168,15 +11145,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v2, 0
; GFX1200-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1200-TRUE16-NEXT: s_cmp_nge_f16 s7, 0x7c00
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1200-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX1200-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX1200-TRUE16-NEXT: s_and_b32 s2, s3, s2
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX1200-TRUE16-NEXT: v_cndmask_b16 v3.l, 0x7e00, v3.l, s2
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1200-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX1200-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x7e00, v3.l, s2
; GFX1200-TRUE16-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1200-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
index 792d7db..76016e4 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
@@ -850,15 +850,13 @@ define amdgpu_kernel void @v_insertelement_v2i16_0_reghi(ptr addrspace(1) %out,
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x10
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v1, v0, s[2:3]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e64 v2, 16, s4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s4
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v1, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
; GFX11-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
index a2c1545..447a5f2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
@@ -361,12 +361,10 @@ define amdgpu_kernel void @raw_atomic_buffer_load_v4i16(<4 x i32> %addr) {
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
@@ -444,12 +442,10 @@ define amdgpu_kernel void @raw_atomic_buffer_load_v4i16(<4 x i32> %addr) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
index 6f7c001..2e0e420 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
@@ -361,12 +361,10 @@ define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %pt
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
@@ -444,12 +442,10 @@ define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %pt
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
index 8896364..ebb33684 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
@@ -455,12 +455,10 @@ define amdgpu_kernel void @struct_atomic_buffer_load_v4i16(<4 x i32> %addr, i32
; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
@@ -550,12 +548,10 @@ define amdgpu_kernel void @struct_atomic_buffer_load_v4i16(<4 x i32> %addr, i32
; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
index 23db247..40be567 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
@@ -455,12 +455,10 @@ define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i16(ptr addrspace(8)
; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
@@ -550,12 +548,10 @@ define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i16(ptr addrspace(8)
; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll
index 92a2f54..068a989 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll
@@ -1053,19 +1053,15 @@ define void @s_maximum_v2f16(<2 x half> inreg %src0, <2 x half> inreg %src1) {
; GFX11-TRUE16-LABEL: s_maximum_v2f16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s2, s0, s1
; GFX11-TRUE16-NEXT: v_pk_max_f16 v0, s0, s1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s1, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s0, s0, 16
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: s_lshr_b32 s2, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s0, 16
; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s0, s0, s1
+; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s1, s3, s2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x7e00, v1.l, s0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s1
; GFX11-TRUE16-NEXT: ;;#ASMSTART
; GFX11-TRUE16-NEXT: ; use v0
; GFX11-TRUE16-NEXT: ;;#ASMEND
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll
index 9e82b41..2482d10 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll
@@ -866,19 +866,15 @@ define void @s_minimum_v2f16(<2 x half> inreg %src0, <2 x half> inreg %src1) {
; GFX11-TRUE16-LABEL: s_minimum_v2f16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s2, s0, s1
; GFX11-TRUE16-NEXT: v_pk_min_f16 v0, s0, s1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s1, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s0, s0, 16
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: s_lshr_b32 s2, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s0, 16
; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s0, s0, s1
+; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s1, s3, s2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x7e00, v1.l, s0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s1
; GFX11-TRUE16-NEXT: ;;#ASMSTART
; GFX11-TRUE16-NEXT: ; use v0
; GFX11-TRUE16-NEXT: ;;#ASMEND
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.sqrt.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.sqrt.bf16.ll
index dcf01f7..818dff4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.sqrt.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.sqrt.bf16.ll
@@ -63,14 +63,10 @@ define amdgpu_kernel void @sqrt_v2bf16(ptr addrspace(1) %r, ptr addrspace(1) %a)
; GFX12-TRUE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
; GFX12-TRUE16-NEXT: s_mov_b32 s5, s1
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: v_sqrt_bf16_e32 v1.l, v0.l
-; GFX12-TRUE16-NEXT: v_nop
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(TRANS32_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX12-TRUE16-NEXT: v_sqrt_bf16_e32 v0.l, v0.l
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(TRANS32_DEP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sqrt_bf16_e32 v0.h, v1.l
; GFX12-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
; GFX12-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
index b534c2c..6f63384 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
@@ -9604,11 +9604,11 @@ define amdgpu_kernel void @constant_zextload_v2i8_to_v2i16(ptr addrspace(1) %out
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v1
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 8, v2
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
; GFX12-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX12-TRUE16-NEXT: s_endpgm
;
@@ -9738,11 +9738,11 @@ define amdgpu_kernel void @constant_sextload_v2i8_to_v2i16(ptr addrspace(1) %out
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: v_bfe_i32 v2, v1, 0, 16
; GFX12-TRUE16-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 8, v2
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
; GFX12-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX12-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv.ll b/llvm/test/CodeGen/AMDGPU/sdiv.ll
index 5c0f813..441509b 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv.ll
@@ -391,156 +391,144 @@ define amdgpu_kernel void @slow_sdiv_i32_3435(ptr addrspace(1) %out, ptr addrspa
define amdgpu_kernel void @sdiv_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
; GCN-LABEL: sdiv_v2i32:
; GCN: ; %bb.0:
-; GCN-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s3, 0xf000
-; GCN-NEXT: s_mov_b32 s2, -1
-; GCN-NEXT: s_mov_b32 s10, s2
-; GCN-NEXT: s_mov_b32 s11, s3
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: s_mov_b32 s10, s6
+; GCN-NEXT: s_mov_b32 s11, s7
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s8, s6
-; GCN-NEXT: s_mov_b32 s9, s7
+; GCN-NEXT: s_mov_b32 s8, s2
+; GCN-NEXT: s_mov_b32 s9, s3
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GCN-NEXT: s_mov_b32 s4, s0
+; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_readfirstlane_b32 s0, v2
-; GCN-NEXT: s_abs_i32 s1, s0
-; GCN-NEXT: v_cvt_f32_u32_e32 v2, s1
-; GCN-NEXT: s_sub_i32 s6, 0, s1
-; GCN-NEXT: v_readfirstlane_b32 s8, v3
-; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; GCN-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT: v_mul_lo_u32 v4, s6, v2
-; GCN-NEXT: v_readfirstlane_b32 s6, v0
-; GCN-NEXT: s_abs_i32 s7, s6
-; GCN-NEXT: s_xor_b32 s0, s6, s0
-; GCN-NEXT: v_mul_hi_u32 v4, v2, v4
-; GCN-NEXT: s_ashr_i32 s6, s0, 31
-; GCN-NEXT: v_add_i32_e32 v0, vcc, v2, v4
-; GCN-NEXT: v_mul_hi_u32 v0, s7, v0
-; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: s_mul_i32 s0, s0, s1
-; GCN-NEXT: s_sub_i32 s0, s7, s0
-; GCN-NEXT: s_sub_i32 s7, s0, s1
-; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT: s_cmp_ge_u32 s0, s1
-; GCN-NEXT: s_cselect_b64 vcc, -1, 0
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT: s_cselect_b32 s0, s7, s0
-; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT: s_cmp_ge_u32 s0, s1
-; GCN-NEXT: s_cselect_b64 vcc, -1, 0
-; GCN-NEXT: s_abs_i32 s7, s8
-; GCN-NEXT: v_cvt_f32_u32_e32 v3, s7
-; GCN-NEXT: s_mov_b32 s0, s4
-; GCN-NEXT: s_sub_i32 s4, 0, s7
-; GCN-NEXT: s_mov_b32 s1, s5
-; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT: v_xor_b32_e32 v0, s6, v0
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s6, v0
-; GCN-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT: v_mul_lo_u32 v4, s4, v3
-; GCN-NEXT: v_readfirstlane_b32 s4, v1
-; GCN-NEXT: s_xor_b32 s5, s4, s8
-; GCN-NEXT: s_abs_i32 s4, s4
-; GCN-NEXT: v_mul_hi_u32 v1, v3, v4
-; GCN-NEXT: s_ashr_i32 s5, s5, 31
-; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1
-; GCN-NEXT: v_mul_hi_u32 v1, s4, v1
-; GCN-NEXT: v_readfirstlane_b32 s6, v1
-; GCN-NEXT: s_mul_i32 s6, s6, s7
-; GCN-NEXT: s_sub_i32 s4, s4, s6
-; GCN-NEXT: s_sub_i32 s6, s4, s7
-; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v1
-; GCN-NEXT: s_cmp_ge_u32 s4, s7
-; GCN-NEXT: s_cselect_b64 vcc, -1, 0
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT: s_cselect_b32 s4, s6, s4
-; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v1
-; GCN-NEXT: s_cmp_ge_u32 s4, s7
-; GCN-NEXT: s_cselect_b64 vcc, -1, 0
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT: v_xor_b32_e32 v1, s5, v1
-; GCN-NEXT: v_subrev_i32_e32 v1, vcc, s5, v1
-; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT: v_sub_i32_e32 v6, vcc, 0, v2
+; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v3
+; GCN-NEXT: v_xor_b32_e32 v4, v0, v2
+; GCN-NEXT: v_xor_b32_e32 v7, v1, v3
+; GCN-NEXT: v_max_i32_e32 v2, v2, v6
+; GCN-NEXT: v_max_i32_e32 v3, v3, v9
+; GCN-NEXT: v_cvt_f32_u32_e32 v6, v2
+; GCN-NEXT: v_cvt_f32_u32_e32 v9, v3
+; GCN-NEXT: v_sub_i32_e32 v5, vcc, 0, v0
+; GCN-NEXT: v_rcp_iflag_f32_e32 v6, v6
+; GCN-NEXT: v_max_i32_e32 v0, v0, v5
+; GCN-NEXT: v_rcp_iflag_f32_e32 v5, v9
+; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v2
+; GCN-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
+; GCN-NEXT: v_mul_f32_e32 v5, 0x4f7ffffe, v5
+; GCN-NEXT: v_cvt_u32_f32_e32 v6, v6
+; GCN-NEXT: v_cvt_u32_f32_e32 v5, v5
+; GCN-NEXT: v_sub_i32_e32 v10, vcc, 0, v3
+; GCN-NEXT: v_mul_lo_u32 v9, v9, v6
+; GCN-NEXT: v_mul_lo_u32 v10, v10, v5
+; GCN-NEXT: v_sub_i32_e32 v8, vcc, 0, v1
+; GCN-NEXT: v_mul_hi_u32 v9, v6, v9
+; GCN-NEXT: v_max_i32_e32 v1, v1, v8
+; GCN-NEXT: v_mul_hi_u32 v8, v5, v10
+; GCN-NEXT: v_ashrrev_i32_e32 v4, 31, v4
+; GCN-NEXT: v_add_i32_e32 v6, vcc, v6, v9
+; GCN-NEXT: v_add_i32_e32 v5, vcc, v5, v8
+; GCN-NEXT: v_mul_hi_u32 v6, v0, v6
+; GCN-NEXT: v_mul_hi_u32 v5, v1, v5
+; GCN-NEXT: v_ashrrev_i32_e32 v7, 31, v7
+; GCN-NEXT: v_mul_lo_u32 v8, v6, v2
+; GCN-NEXT: v_mul_lo_u32 v10, v5, v3
+; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v6
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v10
+; GCN-NEXT: v_add_i32_e32 v11, vcc, 1, v5
+; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v2
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v1, v3
+; GCN-NEXT: v_sub_i32_e32 v8, vcc, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[0:1]
+; GCN-NEXT: v_sub_i32_e32 v9, vcc, v1, v3
+; GCN-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[2:3]
+; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v8, s[0:1]
+; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v6
+; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[2:3]
+; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v5
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; GCN-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
+; GCN-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v4
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v7
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
+; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v7
+; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
; TONGA-LABEL: sdiv_v2i32:
; TONGA: ; %bb.0:
-; TONGA-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; TONGA-NEXT: s_mov_b32 s3, 0xf000
-; TONGA-NEXT: s_mov_b32 s2, -1
-; TONGA-NEXT: s_mov_b32 s10, s2
-; TONGA-NEXT: s_mov_b32 s11, s3
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; TONGA-NEXT: s_mov_b32 s7, 0xf000
+; TONGA-NEXT: s_mov_b32 s6, -1
+; TONGA-NEXT: s_mov_b32 s10, s6
+; TONGA-NEXT: s_mov_b32 s11, s7
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
-; TONGA-NEXT: s_mov_b32 s8, s6
-; TONGA-NEXT: s_mov_b32 s9, s7
+; TONGA-NEXT: s_mov_b32 s8, s2
+; TONGA-NEXT: s_mov_b32 s9, s3
; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; TONGA-NEXT: s_mov_b32 s4, s0
+; TONGA-NEXT: s_mov_b32 s5, s1
; TONGA-NEXT: s_waitcnt vmcnt(0)
-; TONGA-NEXT: v_readfirstlane_b32 s0, v2
-; TONGA-NEXT: s_abs_i32 s1, s0
-; TONGA-NEXT: v_cvt_f32_u32_e32 v2, s1
-; TONGA-NEXT: s_sub_i32 s6, 0, s1
-; TONGA-NEXT: v_readfirstlane_b32 s8, v3
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; TONGA-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; TONGA-NEXT: v_cvt_u32_f32_e32 v2, v2
-; TONGA-NEXT: v_mul_lo_u32 v4, s6, v2
-; TONGA-NEXT: v_readfirstlane_b32 s6, v0
-; TONGA-NEXT: s_abs_i32 s7, s6
-; TONGA-NEXT: s_xor_b32 s0, s6, s0
-; TONGA-NEXT: v_mul_hi_u32 v4, v2, v4
-; TONGA-NEXT: s_ashr_i32 s6, s0, 31
-; TONGA-NEXT: v_add_u32_e32 v0, vcc, v2, v4
-; TONGA-NEXT: v_mul_hi_u32 v0, s7, v0
-; TONGA-NEXT: v_readfirstlane_b32 s0, v0
-; TONGA-NEXT: s_mul_i32 s0, s0, s1
-; TONGA-NEXT: s_sub_i32 s0, s7, s0
-; TONGA-NEXT: s_sub_i32 s7, s0, s1
-; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v0
-; TONGA-NEXT: s_cmp_ge_u32 s0, s1
-; TONGA-NEXT: s_cselect_b64 vcc, -1, 0
-; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; TONGA-NEXT: s_cselect_b32 s0, s7, s0
-; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v0
-; TONGA-NEXT: s_cmp_ge_u32 s0, s1
-; TONGA-NEXT: s_cselect_b64 vcc, -1, 0
-; TONGA-NEXT: s_abs_i32 s7, s8
-; TONGA-NEXT: v_cvt_f32_u32_e32 v3, s7
-; TONGA-NEXT: s_mov_b32 s0, s4
-; TONGA-NEXT: s_sub_i32 s4, 0, s7
-; TONGA-NEXT: s_mov_b32 s1, s5
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; TONGA-NEXT: v_xor_b32_e32 v0, s6, v0
-; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, s6, v0
-; TONGA-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; TONGA-NEXT: v_cvt_u32_f32_e32 v3, v3
-; TONGA-NEXT: v_mul_lo_u32 v4, s4, v3
-; TONGA-NEXT: v_readfirstlane_b32 s4, v1
-; TONGA-NEXT: s_xor_b32 s5, s4, s8
-; TONGA-NEXT: s_abs_i32 s4, s4
-; TONGA-NEXT: v_mul_hi_u32 v1, v3, v4
-; TONGA-NEXT: s_ashr_i32 s5, s5, 31
-; TONGA-NEXT: v_add_u32_e32 v1, vcc, v3, v1
-; TONGA-NEXT: v_mul_hi_u32 v1, s4, v1
-; TONGA-NEXT: v_readfirstlane_b32 s6, v1
-; TONGA-NEXT: s_mul_i32 s6, s6, s7
-; TONGA-NEXT: s_sub_i32 s4, s4, s6
-; TONGA-NEXT: s_sub_i32 s6, s4, s7
-; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v1
-; TONGA-NEXT: s_cmp_ge_u32 s4, s7
-; TONGA-NEXT: s_cselect_b64 vcc, -1, 0
-; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; TONGA-NEXT: s_cselect_b32 s4, s6, s4
-; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v1
-; TONGA-NEXT: s_cmp_ge_u32 s4, s7
-; TONGA-NEXT: s_cselect_b64 vcc, -1, 0
-; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; TONGA-NEXT: v_xor_b32_e32 v1, s5, v1
-; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s5, v1
-; TONGA-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; TONGA-NEXT: v_sub_u32_e32 v6, vcc, 0, v2
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v3
+; TONGA-NEXT: v_xor_b32_e32 v4, v0, v2
+; TONGA-NEXT: v_xor_b32_e32 v7, v1, v3
+; TONGA-NEXT: v_max_i32_e32 v2, v2, v6
+; TONGA-NEXT: v_max_i32_e32 v3, v3, v9
+; TONGA-NEXT: v_cvt_f32_u32_e32 v6, v2
+; TONGA-NEXT: v_cvt_f32_u32_e32 v9, v3
+; TONGA-NEXT: v_sub_u32_e32 v5, vcc, 0, v0
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v6, v6
+; TONGA-NEXT: v_max_i32_e32 v0, v0, v5
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v5, v9
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v2
+; TONGA-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
+; TONGA-NEXT: v_mul_f32_e32 v5, 0x4f7ffffe, v5
+; TONGA-NEXT: v_cvt_u32_f32_e32 v6, v6
+; TONGA-NEXT: v_cvt_u32_f32_e32 v5, v5
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, 0, v3
+; TONGA-NEXT: v_mul_lo_u32 v9, v9, v6
+; TONGA-NEXT: v_mul_lo_u32 v10, v10, v5
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, 0, v1
+; TONGA-NEXT: v_mul_hi_u32 v9, v6, v9
+; TONGA-NEXT: v_max_i32_e32 v1, v1, v8
+; TONGA-NEXT: v_mul_hi_u32 v8, v5, v10
+; TONGA-NEXT: v_ashrrev_i32_e32 v4, 31, v4
+; TONGA-NEXT: v_add_u32_e32 v6, vcc, v6, v9
+; TONGA-NEXT: v_add_u32_e32 v5, vcc, v5, v8
+; TONGA-NEXT: v_mul_hi_u32 v6, v0, v6
+; TONGA-NEXT: v_mul_hi_u32 v5, v1, v5
+; TONGA-NEXT: v_ashrrev_i32_e32 v7, 31, v7
+; TONGA-NEXT: v_mul_lo_u32 v8, v6, v2
+; TONGA-NEXT: v_mul_lo_u32 v10, v5, v3
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v6
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v8
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v1, v10
+; TONGA-NEXT: v_add_u32_e32 v11, vcc, 1, v5
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v2
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v1, v3
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v0, v2
+; TONGA-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[0:1]
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, v1, v3
+; TONGA-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[2:3]
+; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v8, s[0:1]
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v6
+; TONGA-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[2:3]
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v5
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TONGA-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
+; TONGA-NEXT: v_xor_b32_e32 v0, v0, v4
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v7
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v4
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v1, v7
+; TONGA-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; TONGA-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_v2i32:
@@ -558,44 +546,44 @@ define amdgpu_kernel void @sdiv_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX9-NEXT: v_readfirstlane_b32 s0, v2
; GFX9-NEXT: s_abs_i32 s1, s0
; GFX9-NEXT: v_cvt_f32_u32_e32 v2, s1
-; GFX9-NEXT: v_readfirstlane_b32 s4, v0
-; GFX9-NEXT: s_xor_b32 s0, s4, s0
+; GFX9-NEXT: v_readfirstlane_b32 s5, v0
+; GFX9-NEXT: s_xor_b32 s0, s5, s0
; GFX9-NEXT: s_ashr_i32 s6, s0, 31
; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v2
; GFX9-NEXT: s_sub_i32 s0, 0, s1
-; GFX9-NEXT: s_abs_i32 s4, s4
-; GFX9-NEXT: v_readfirstlane_b32 s5, v3
+; GFX9-NEXT: s_abs_i32 s5, s5
+; GFX9-NEXT: v_readfirstlane_b32 s4, v3
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v2
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
; GFX9-NEXT: s_mul_i32 s0, s0, s7
; GFX9-NEXT: s_mul_hi_u32 s0, s7, s0
; GFX9-NEXT: s_add_i32 s7, s7, s0
-; GFX9-NEXT: s_mul_hi_u32 s0, s4, s7
+; GFX9-NEXT: s_mul_hi_u32 s0, s5, s7
; GFX9-NEXT: s_mul_i32 s7, s0, s1
-; GFX9-NEXT: s_sub_i32 s4, s4, s7
+; GFX9-NEXT: s_sub_i32 s5, s5, s7
; GFX9-NEXT: s_add_i32 s10, s0, 1
-; GFX9-NEXT: s_sub_i32 s7, s4, s1
-; GFX9-NEXT: s_cmp_ge_u32 s4, s1
+; GFX9-NEXT: s_sub_i32 s7, s5, s1
+; GFX9-NEXT: s_cmp_ge_u32 s5, s1
; GFX9-NEXT: s_cselect_b32 s0, s10, s0
-; GFX9-NEXT: s_cselect_b32 s4, s7, s4
+; GFX9-NEXT: s_cselect_b32 s5, s7, s5
; GFX9-NEXT: s_add_i32 s7, s0, 1
-; GFX9-NEXT: s_cmp_ge_u32 s4, s1
-; GFX9-NEXT: s_cselect_b32 s4, s7, s0
-; GFX9-NEXT: s_abs_i32 s7, s5
+; GFX9-NEXT: s_cmp_ge_u32 s5, s1
+; GFX9-NEXT: s_cselect_b32 s5, s7, s0
+; GFX9-NEXT: s_abs_i32 s7, s4
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s7
-; GFX9-NEXT: s_xor_b32 s4, s4, s6
+; GFX9-NEXT: s_xor_b32 s5, s5, s6
; GFX9-NEXT: s_mov_b32 s1, s9
; GFX9-NEXT: s_sub_i32 s9, 0, s7
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX9-NEXT: s_sub_i32 s4, s4, s6
+; GFX9-NEXT: s_sub_i32 s5, s5, s6
; GFX9-NEXT: s_mov_b32 s0, s8
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT: s_xor_b32 s5, s8, s5
+; GFX9-NEXT: s_xor_b32 s4, s8, s4
; GFX9-NEXT: s_abs_i32 s8, s8
-; GFX9-NEXT: s_ashr_i32 s5, s5, 31
+; GFX9-NEXT: s_ashr_i32 s4, s4, 31
; GFX9-NEXT: v_readfirstlane_b32 s6, v0
; GFX9-NEXT: s_mul_i32 s9, s9, s6
; GFX9-NEXT: s_mul_hi_u32 s9, s6, s9
@@ -611,10 +599,10 @@ define amdgpu_kernel void @sdiv_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX9-NEXT: s_add_i32 s9, s6, 1
; GFX9-NEXT: s_cmp_ge_u32 s8, s7
; GFX9-NEXT: s_cselect_b32 s6, s9, s6
-; GFX9-NEXT: s_xor_b32 s6, s6, s5
-; GFX9-NEXT: s_sub_i32 s5, s6, s5
-; GFX9-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_xor_b32 s6, s6, s4
+; GFX9-NEXT: s_sub_i32 s4, s6, s4
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX9-NEXT: s_endpgm
;
@@ -804,255 +792,255 @@ define amdgpu_kernel void @sdiv_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-LABEL: sdiv_v4i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s11, 0xf000
-; GCN-NEXT: s_mov_b32 s10, -1
-; GCN-NEXT: s_mov_b32 s6, s10
-; GCN-NEXT: s_mov_b32 s7, s11
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: s_mov_b32 s10, s6
+; GCN-NEXT: s_mov_b32 s11, s7
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, s2
-; GCN-NEXT: s_mov_b32 s5, s3
-; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0 offset:16
-; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0
-; GCN-NEXT: s_mov_b32 s8, s0
-; GCN-NEXT: s_mov_b32 s9, s1
+; GCN-NEXT: s_mov_b32 s8, s2
+; GCN-NEXT: s_mov_b32 s9, s3
+; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
+; GCN-NEXT: s_mov_b32 s4, s0
+; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: v_readfirstlane_b32 s1, v1
-; GCN-NEXT: v_readfirstlane_b32 s2, v2
-; GCN-NEXT: s_abs_i32 s13, s0
-; GCN-NEXT: s_abs_i32 s14, s1
-; GCN-NEXT: s_abs_i32 s15, s2
-; GCN-NEXT: v_cvt_f32_u32_e32 v0, s13
-; GCN-NEXT: v_cvt_f32_u32_e32 v1, s14
-; GCN-NEXT: v_cvt_f32_u32_e32 v2, s15
-; GCN-NEXT: v_readfirstlane_b32 s6, v3
+; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v0
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_sub_i32_e32 v10, vcc, 0, v4
+; GCN-NEXT: v_xor_b32_e32 v8, v0, v4
+; GCN-NEXT: v_max_i32_e32 v4, v4, v10
+; GCN-NEXT: v_cvt_f32_u32_e32 v10, v4
+; GCN-NEXT: v_sub_i32_e32 v13, vcc, 0, v5
+; GCN-NEXT: v_xor_b32_e32 v11, v1, v5
+; GCN-NEXT: v_rcp_iflag_f32_e32 v10, v10
+; GCN-NEXT: v_max_i32_e32 v5, v5, v13
+; GCN-NEXT: v_cvt_f32_u32_e32 v13, v5
+; GCN-NEXT: v_sub_i32_e32 v16, vcc, 0, v4
+; GCN-NEXT: v_mul_f32_e32 v10, 0x4f7ffffe, v10
+; GCN-NEXT: v_cvt_u32_f32_e32 v10, v10
+; GCN-NEXT: v_rcp_iflag_f32_e32 v13, v13
+; GCN-NEXT: v_sub_i32_e32 v12, vcc, 0, v1
+; GCN-NEXT: v_mul_lo_u32 v16, v16, v10
+; GCN-NEXT: v_mul_f32_e32 v13, 0x4f7ffffe, v13
+; GCN-NEXT: v_cvt_u32_f32_e32 v13, v13
+; GCN-NEXT: v_max_i32_e32 v0, v0, v9
+; GCN-NEXT: v_mul_hi_u32 v16, v10, v16
+; GCN-NEXT: v_max_i32_e32 v1, v1, v12
+; GCN-NEXT: v_sub_i32_e32 v15, vcc, 0, v6
+; GCN-NEXT: v_add_i32_e32 v10, vcc, v10, v16
+; GCN-NEXT: v_sub_i32_e32 v16, vcc, 0, v5
+; GCN-NEXT: v_mul_lo_u32 v16, v16, v13
+; GCN-NEXT: v_mul_hi_u32 v10, v0, v10
+; GCN-NEXT: v_xor_b32_e32 v14, v2, v6
+; GCN-NEXT: v_max_i32_e32 v6, v6, v15
+; GCN-NEXT: v_mul_hi_u32 v12, v13, v16
+; GCN-NEXT: v_cvt_f32_u32_e32 v15, v6
+; GCN-NEXT: v_ashrrev_i32_e32 v8, 31, v8
+; GCN-NEXT: v_ashrrev_i32_e32 v11, 31, v11
+; GCN-NEXT: v_add_i32_e32 v12, vcc, v13, v12
+; GCN-NEXT: v_mul_lo_u32 v13, v10, v4
+; GCN-NEXT: v_mul_hi_u32 v12, v1, v12
+; GCN-NEXT: v_rcp_iflag_f32_e32 v9, v15
+; GCN-NEXT: v_ashrrev_i32_e32 v14, 31, v14
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v13
+; GCN-NEXT: v_add_i32_e32 v13, vcc, 1, v10
+; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4
+; GCN-NEXT: v_cndmask_b32_e64 v10, v10, v13, s[0:1]
+; GCN-NEXT: v_sub_i32_e32 v13, vcc, v0, v4
+; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v13, s[0:1]
+; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4
+; GCN-NEXT: v_mul_lo_u32 v0, v12, v5
+; GCN-NEXT: v_mul_f32_e32 v9, 0x4f7ffffe, v9
+; GCN-NEXT: v_cvt_u32_f32_e32 v9, v9
+; GCN-NEXT: v_sub_i32_e32 v4, vcc, 0, v6
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v1, v0
+; GCN-NEXT: v_add_i32_e32 v1, vcc, 1, v12
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5
+; GCN-NEXT: v_cndmask_b32_e64 v1, v12, v1, s[2:3]
+; GCN-NEXT: v_sub_i32_e32 v12, vcc, v0, v5
+; GCN-NEXT: v_mul_lo_u32 v4, v4, v9
+; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v12, s[2:3]
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, 0, v7
+; GCN-NEXT: v_max_i32_e32 v5, v7, v0
+; GCN-NEXT: v_cvt_f32_u32_e32 v0, v5
+; GCN-NEXT: v_mul_hi_u32 v4, v9, v4
+; GCN-NEXT: v_add_i32_e32 v13, vcc, 1, v10
; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v1
-; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; GCN-NEXT: s_abs_i32 s17, s6
-; GCN-NEXT: v_cvt_f32_u32_e32 v3, s17
+; GCN-NEXT: v_add_i32_e32 v4, vcc, v9, v4
+; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v2
+; GCN-NEXT: v_max_i32_e32 v2, v2, v9
+; GCN-NEXT: v_mul_hi_u32 v4, v2, v4
; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
-; GCN-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; GCN-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_readfirstlane_b32 s3, v4
-; GCN-NEXT: v_readfirstlane_b32 s4, v5
-; GCN-NEXT: v_readfirstlane_b32 s5, v6
-; GCN-NEXT: s_xor_b32 s12, s3, s0
-; GCN-NEXT: s_xor_b32 s0, s4, s1
-; GCN-NEXT: s_xor_b32 s1, s5, s2
-; GCN-NEXT: s_sub_i32 s2, 0, s13
-; GCN-NEXT: s_ashr_i32 s18, s0, 31
-; GCN-NEXT: s_sub_i32 s0, 0, s14
-; GCN-NEXT: s_ashr_i32 s19, s1, 31
-; GCN-NEXT: s_sub_i32 s1, 0, s15
-; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GCN-NEXT: v_mul_lo_u32 v4, s2, v0
-; GCN-NEXT: v_mul_lo_u32 v5, s0, v1
-; GCN-NEXT: v_mul_lo_u32 v6, s1, v2
-; GCN-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT: v_mul_hi_u32 v4, v0, v4
-; GCN-NEXT: v_mul_hi_u32 v5, v1, v5
-; GCN-NEXT: v_mul_hi_u32 v6, v2, v6
-; GCN-NEXT: s_sub_i32 s20, 0, s17
-; GCN-NEXT: v_readfirstlane_b32 s7, v7
-; GCN-NEXT: s_abs_i32 s3, s3
-; GCN-NEXT: s_abs_i32 s4, s4
-; GCN-NEXT: s_abs_i32 s5, s5
-; GCN-NEXT: v_mul_lo_u32 v7, s20, v3
-; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v5
-; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v6
-; GCN-NEXT: v_mul_hi_u32 v0, s3, v0
-; GCN-NEXT: v_mul_hi_u32 v1, s4, v1
-; GCN-NEXT: v_mul_hi_u32 v2, s5, v2
-; GCN-NEXT: v_mul_hi_u32 v7, v3, v7
-; GCN-NEXT: v_mul_lo_u32 v4, v0, s13
-; GCN-NEXT: v_mul_lo_u32 v6, v1, s14
-; GCN-NEXT: v_mul_lo_u32 v8, v2, s15
-; GCN-NEXT: s_abs_i32 s16, s7
-; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v7
-; GCN-NEXT: v_mul_hi_u32 v3, s16, v3
-; GCN-NEXT: v_sub_i32_e32 v4, vcc, s3, v4
-; GCN-NEXT: v_sub_i32_e32 v6, vcc, s4, v6
-; GCN-NEXT: v_sub_i32_e32 v8, vcc, s5, v8
-; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v0
-; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v1
-; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v2
-; GCN-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v4
-; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s14, v6
-; GCN-NEXT: v_cmp_le_u32_e64 s[4:5], s15, v8
-; GCN-NEXT: v_subrev_i32_e32 v10, vcc, s13, v4
-; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[0:1]
-; GCN-NEXT: v_subrev_i32_e32 v5, vcc, s14, v6
-; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[2:3]
-; GCN-NEXT: v_subrev_i32_e32 v7, vcc, s15, v8
-; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1]
-; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v0
-; GCN-NEXT: v_cndmask_b32_e64 v5, v6, v5, s[2:3]
-; GCN-NEXT: v_add_i32_e32 v6, vcc, 1, v1
-; GCN-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[4:5]
-; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v2
-; GCN-NEXT: v_cmp_le_u32_e32 vcc, s13, v4
-; GCN-NEXT: v_mul_lo_u32 v4, v3, s17
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc
-; GCN-NEXT: v_cmp_le_u32_e32 vcc, s14, v5
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
-; GCN-NEXT: v_cmp_le_u32_e32 vcc, s15, v7
-; GCN-NEXT: s_ashr_i32 s12, s12, 31
-; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc
-; GCN-NEXT: v_xor_b32_e32 v0, s12, v0
-; GCN-NEXT: v_xor_b32_e32 v1, s18, v1
-; GCN-NEXT: v_xor_b32_e32 v2, s19, v2
-; GCN-NEXT: v_sub_i32_e32 v4, vcc, s16, v4
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s12, v0
-; GCN-NEXT: v_subrev_i32_e32 v1, vcc, s18, v1
-; GCN-NEXT: v_subrev_i32_e32 v2, vcc, s19, v2
-; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v3
-; GCN-NEXT: v_subrev_i32_e32 v6, vcc, s17, v4
-; GCN-NEXT: v_cmp_le_u32_e32 vcc, s17, v4
-; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v3
-; GCN-NEXT: s_xor_b32 s0, s7, s6
-; GCN-NEXT: v_cmp_le_u32_e32 vcc, s17, v4
-; GCN-NEXT: s_ashr_i32 s0, s0, 31
-; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; GCN-NEXT: v_xor_b32_e32 v3, s0, v3
-; GCN-NEXT: v_subrev_i32_e32 v3, vcc, s0, v3
-; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
+; GCN-NEXT: v_cvt_u32_f32_e32 v9, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v10, v13, s[0:1]
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v8
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-NEXT: v_mul_lo_u32 v8, v4, v6
+; GCN-NEXT: v_add_i32_e32 v12, vcc, 1, v1
+; GCN-NEXT: v_sub_i32_e32 v10, vcc, 0, v5
+; GCN-NEXT: v_sub_i32_e32 v2, vcc, v2, v8
+; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v12, s[2:3]
+; GCN-NEXT: v_mul_lo_u32 v10, v10, v9
+; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v4
+; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v2, v6
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v11
+; GCN-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[0:1]
+; GCN-NEXT: v_sub_i32_e32 v8, vcc, v2, v6
+; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v11
+; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1]
+; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v4
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6
+; GCN-NEXT: v_cndmask_b32_e32 v2, v4, v8, vcc
+; GCN-NEXT: v_mul_hi_u32 v4, v9, v10
+; GCN-NEXT: v_sub_i32_e32 v6, vcc, 0, v3
+; GCN-NEXT: v_max_i32_e32 v6, v3, v6
+; GCN-NEXT: v_add_i32_e32 v4, vcc, v9, v4
+; GCN-NEXT: v_mul_hi_u32 v4, v6, v4
+; GCN-NEXT: v_xor_b32_e32 v2, v2, v14
+; GCN-NEXT: v_sub_i32_e32 v2, vcc, v2, v14
+; GCN-NEXT: v_mul_lo_u32 v8, v4, v5
+; GCN-NEXT: v_xor_b32_e32 v3, v3, v7
+; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v4
+; GCN-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-NEXT: v_sub_i32_e32 v8, vcc, v6, v5
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
+; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v4
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5
+; GCN-NEXT: v_ashrrev_i32_e32 v3, 31, v3
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; GCN-NEXT: v_xor_b32_e32 v4, v4, v3
+; GCN-NEXT: v_sub_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
; TONGA-LABEL: sdiv_v4i32:
; TONGA: ; %bb.0:
; TONGA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; TONGA-NEXT: s_mov_b32 s11, 0xf000
-; TONGA-NEXT: s_mov_b32 s10, -1
-; TONGA-NEXT: s_mov_b32 s6, s10
-; TONGA-NEXT: s_mov_b32 s7, s11
+; TONGA-NEXT: s_mov_b32 s7, 0xf000
+; TONGA-NEXT: s_mov_b32 s6, -1
+; TONGA-NEXT: s_mov_b32 s10, s6
+; TONGA-NEXT: s_mov_b32 s11, s7
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
-; TONGA-NEXT: s_mov_b32 s4, s2
-; TONGA-NEXT: s_mov_b32 s5, s3
-; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0 offset:16
-; TONGA-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0
-; TONGA-NEXT: s_mov_b32 s8, s0
-; TONGA-NEXT: s_mov_b32 s9, s1
+; TONGA-NEXT: s_mov_b32 s8, s2
+; TONGA-NEXT: s_mov_b32 s9, s3
+; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; TONGA-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
+; TONGA-NEXT: s_mov_b32 s4, s0
+; TONGA-NEXT: s_mov_b32 s5, s1
; TONGA-NEXT: s_waitcnt vmcnt(1)
-; TONGA-NEXT: v_readfirstlane_b32 s0, v0
-; TONGA-NEXT: v_readfirstlane_b32 s1, v1
-; TONGA-NEXT: v_readfirstlane_b32 s2, v2
-; TONGA-NEXT: s_abs_i32 s13, s0
-; TONGA-NEXT: s_abs_i32 s14, s1
-; TONGA-NEXT: s_abs_i32 s15, s2
-; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s13
-; TONGA-NEXT: v_cvt_f32_u32_e32 v1, s14
-; TONGA-NEXT: v_cvt_f32_u32_e32 v2, s15
-; TONGA-NEXT: v_readfirstlane_b32 s6, v3
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v0
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, 0, v4
+; TONGA-NEXT: v_xor_b32_e32 v8, v0, v4
+; TONGA-NEXT: v_max_i32_e32 v4, v4, v10
+; TONGA-NEXT: v_cvt_f32_u32_e32 v10, v4
+; TONGA-NEXT: v_sub_u32_e32 v13, vcc, 0, v5
+; TONGA-NEXT: v_xor_b32_e32 v11, v1, v5
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v10, v10
+; TONGA-NEXT: v_max_i32_e32 v5, v5, v13
+; TONGA-NEXT: v_cvt_f32_u32_e32 v13, v5
+; TONGA-NEXT: v_sub_u32_e32 v16, vcc, 0, v4
+; TONGA-NEXT: v_mul_f32_e32 v10, 0x4f7ffffe, v10
+; TONGA-NEXT: v_cvt_u32_f32_e32 v10, v10
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v13, v13
+; TONGA-NEXT: v_sub_u32_e32 v12, vcc, 0, v1
+; TONGA-NEXT: v_mul_lo_u32 v16, v16, v10
+; TONGA-NEXT: v_mul_f32_e32 v13, 0x4f7ffffe, v13
+; TONGA-NEXT: v_cvt_u32_f32_e32 v13, v13
+; TONGA-NEXT: v_max_i32_e32 v0, v0, v9
+; TONGA-NEXT: v_mul_hi_u32 v16, v10, v16
+; TONGA-NEXT: v_max_i32_e32 v1, v1, v12
+; TONGA-NEXT: v_sub_u32_e32 v15, vcc, 0, v6
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v10, v16
+; TONGA-NEXT: v_sub_u32_e32 v16, vcc, 0, v5
+; TONGA-NEXT: v_mul_lo_u32 v16, v16, v13
+; TONGA-NEXT: v_mul_hi_u32 v10, v0, v10
+; TONGA-NEXT: v_xor_b32_e32 v14, v2, v6
+; TONGA-NEXT: v_max_i32_e32 v6, v6, v15
+; TONGA-NEXT: v_mul_hi_u32 v12, v13, v16
+; TONGA-NEXT: v_cvt_f32_u32_e32 v15, v6
+; TONGA-NEXT: v_ashrrev_i32_e32 v8, 31, v8
+; TONGA-NEXT: v_ashrrev_i32_e32 v11, 31, v11
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, v13, v12
+; TONGA-NEXT: v_mul_lo_u32 v13, v10, v4
+; TONGA-NEXT: v_mul_hi_u32 v12, v1, v12
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v9, v15
+; TONGA-NEXT: v_ashrrev_i32_e32 v14, 31, v14
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v13
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, 1, v10
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4
+; TONGA-NEXT: v_cndmask_b32_e64 v10, v10, v13, s[0:1]
+; TONGA-NEXT: v_sub_u32_e32 v13, vcc, v0, v4
+; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v13, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4
+; TONGA-NEXT: v_mul_lo_u32 v0, v12, v5
+; TONGA-NEXT: v_mul_f32_e32 v9, 0x4f7ffffe, v9
+; TONGA-NEXT: v_cvt_u32_f32_e32 v9, v9
+; TONGA-NEXT: v_sub_u32_e32 v4, vcc, 0, v6
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v1, v0
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, 1, v12
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5
+; TONGA-NEXT: v_cndmask_b32_e64 v1, v12, v1, s[2:3]
+; TONGA-NEXT: v_sub_u32_e32 v12, vcc, v0, v5
+; TONGA-NEXT: v_mul_lo_u32 v4, v4, v9
+; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v12, s[2:3]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, 0, v7
+; TONGA-NEXT: v_max_i32_e32 v5, v7, v0
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v5
+; TONGA-NEXT: v_mul_hi_u32 v4, v9, v4
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, 1, v10
; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v1, v1
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; TONGA-NEXT: s_abs_i32 s17, s6
-; TONGA-NEXT: v_cvt_f32_u32_e32 v3, s17
+; TONGA-NEXT: v_add_u32_e32 v4, vcc, v9, v4
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v2
+; TONGA-NEXT: v_max_i32_e32 v2, v2, v9
+; TONGA-NEXT: v_mul_hi_u32 v4, v2, v4
; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
-; TONGA-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; TONGA-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0
-; TONGA-NEXT: v_cvt_u32_f32_e32 v1, v1
-; TONGA-NEXT: v_cvt_u32_f32_e32 v2, v2
-; TONGA-NEXT: s_waitcnt vmcnt(0)
-; TONGA-NEXT: v_readfirstlane_b32 s3, v4
-; TONGA-NEXT: v_readfirstlane_b32 s4, v5
-; TONGA-NEXT: v_readfirstlane_b32 s5, v6
-; TONGA-NEXT: s_xor_b32 s12, s3, s0
-; TONGA-NEXT: s_xor_b32 s0, s4, s1
-; TONGA-NEXT: s_xor_b32 s1, s5, s2
-; TONGA-NEXT: s_sub_i32 s2, 0, s13
-; TONGA-NEXT: s_ashr_i32 s18, s0, 31
-; TONGA-NEXT: s_sub_i32 s0, 0, s14
-; TONGA-NEXT: s_ashr_i32 s19, s1, 31
-; TONGA-NEXT: s_sub_i32 s1, 0, s15
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; TONGA-NEXT: v_mul_lo_u32 v4, s2, v0
-; TONGA-NEXT: v_mul_lo_u32 v5, s0, v1
-; TONGA-NEXT: v_mul_lo_u32 v6, s1, v2
-; TONGA-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; TONGA-NEXT: v_cvt_u32_f32_e32 v3, v3
-; TONGA-NEXT: v_mul_hi_u32 v4, v0, v4
-; TONGA-NEXT: v_mul_hi_u32 v5, v1, v5
-; TONGA-NEXT: v_mul_hi_u32 v6, v2, v6
-; TONGA-NEXT: s_sub_i32 s20, 0, s17
-; TONGA-NEXT: v_readfirstlane_b32 s7, v7
-; TONGA-NEXT: s_abs_i32 s3, s3
-; TONGA-NEXT: s_abs_i32 s4, s4
-; TONGA-NEXT: s_abs_i32 s5, s5
-; TONGA-NEXT: v_mul_lo_u32 v7, s20, v3
-; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v4
-; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v5
-; TONGA-NEXT: v_add_u32_e32 v2, vcc, v2, v6
-; TONGA-NEXT: v_mul_hi_u32 v0, s3, v0
-; TONGA-NEXT: v_mul_hi_u32 v1, s4, v1
-; TONGA-NEXT: v_mul_hi_u32 v2, s5, v2
-; TONGA-NEXT: v_mul_hi_u32 v7, v3, v7
-; TONGA-NEXT: v_mul_lo_u32 v4, v0, s13
-; TONGA-NEXT: v_mul_lo_u32 v6, v1, s14
-; TONGA-NEXT: v_mul_lo_u32 v8, v2, s15
-; TONGA-NEXT: s_abs_i32 s16, s7
-; TONGA-NEXT: v_add_u32_e32 v3, vcc, v3, v7
-; TONGA-NEXT: v_mul_hi_u32 v3, s16, v3
-; TONGA-NEXT: v_sub_u32_e32 v4, vcc, s3, v4
-; TONGA-NEXT: v_sub_u32_e32 v6, vcc, s4, v6
-; TONGA-NEXT: v_sub_u32_e32 v8, vcc, s5, v8
-; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v0
-; TONGA-NEXT: v_add_u32_e32 v7, vcc, 1, v1
-; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v2
-; TONGA-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v4
-; TONGA-NEXT: v_cmp_le_u32_e64 s[2:3], s14, v6
-; TONGA-NEXT: v_cmp_le_u32_e64 s[4:5], s15, v8
-; TONGA-NEXT: v_subrev_u32_e32 v10, vcc, s13, v4
-; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[0:1]
-; TONGA-NEXT: v_subrev_u32_e32 v5, vcc, s14, v6
-; TONGA-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[2:3]
-; TONGA-NEXT: v_subrev_u32_e32 v7, vcc, s15, v8
-; TONGA-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[4:5]
-; TONGA-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1]
-; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v0
-; TONGA-NEXT: v_cndmask_b32_e64 v5, v6, v5, s[2:3]
-; TONGA-NEXT: v_add_u32_e32 v6, vcc, 1, v1
-; TONGA-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[4:5]
-; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v2
-; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s13, v4
-; TONGA-NEXT: v_mul_lo_u32 v4, v3, s17
-; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc
-; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s14, v5
-; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
-; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s15, v7
-; TONGA-NEXT: s_ashr_i32 s12, s12, 31
-; TONGA-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc
-; TONGA-NEXT: v_xor_b32_e32 v0, s12, v0
-; TONGA-NEXT: v_xor_b32_e32 v1, s18, v1
-; TONGA-NEXT: v_xor_b32_e32 v2, s19, v2
-; TONGA-NEXT: v_sub_u32_e32 v4, vcc, s16, v4
-; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, s12, v0
-; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s18, v1
-; TONGA-NEXT: v_subrev_u32_e32 v2, vcc, s19, v2
-; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v3
-; TONGA-NEXT: v_subrev_u32_e32 v6, vcc, s17, v4
-; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s17, v4
-; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v3
-; TONGA-NEXT: s_xor_b32 s0, s7, s6
-; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s17, v4
-; TONGA-NEXT: s_ashr_i32 s0, s0, 31
-; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; TONGA-NEXT: v_xor_b32_e32 v3, s0, v3
-; TONGA-NEXT: v_subrev_u32_e32 v3, vcc, s0, v3
-; TONGA-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v9, v0
+; TONGA-NEXT: v_cndmask_b32_e64 v0, v10, v13, s[0:1]
+; TONGA-NEXT: v_xor_b32_e32 v0, v0, v8
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v8
+; TONGA-NEXT: v_mul_lo_u32 v8, v4, v6
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, 1, v1
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, 0, v5
+; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v8
+; TONGA-NEXT: v_cndmask_b32_e64 v1, v1, v12, s[2:3]
+; TONGA-NEXT: v_mul_lo_u32 v10, v10, v9
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v4
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v2, v6
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v11
+; TONGA-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[0:1]
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v2, v6
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v1, v11
+; TONGA-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1]
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v4
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6
+; TONGA-NEXT: v_cndmask_b32_e32 v2, v4, v8, vcc
+; TONGA-NEXT: v_mul_hi_u32 v4, v9, v10
+; TONGA-NEXT: v_sub_u32_e32 v6, vcc, 0, v3
+; TONGA-NEXT: v_max_i32_e32 v6, v3, v6
+; TONGA-NEXT: v_add_u32_e32 v4, vcc, v9, v4
+; TONGA-NEXT: v_mul_hi_u32 v4, v6, v4
+; TONGA-NEXT: v_xor_b32_e32 v2, v2, v14
+; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v14
+; TONGA-NEXT: v_mul_lo_u32 v8, v4, v5
+; TONGA-NEXT: v_xor_b32_e32 v3, v3, v7
+; TONGA-NEXT: v_add_u32_e32 v7, vcc, 1, v4
+; TONGA-NEXT: v_sub_u32_e32 v6, vcc, v6, v8
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v6, v5
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5
+; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; TONGA-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
+; TONGA-NEXT: v_add_u32_e32 v7, vcc, 1, v4
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5
+; TONGA-NEXT: v_ashrrev_i32_e32 v3, 31, v3
+; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; TONGA-NEXT: v_xor_b32_e32 v4, v4, v3
+; TONGA-NEXT: v_sub_u32_e32 v3, vcc, v4, v3
+; TONGA-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; TONGA-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_v4i32:
@@ -2006,7 +1994,7 @@ define amdgpu_kernel void @v_sdiv_i25(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_mul_lo_u32 v1, v3, v2
; GCN-NEXT: v_add_i32_e32 v4, vcc, 1, v3
; GCN-NEXT: v_sub_i32_e32 v1, vcc, v5, v1
-; GCN-NEXT: v_subrev_i32_e32 v5, vcc, v2, v1
+; GCN-NEXT: v_sub_i32_e32 v5, vcc, v1, v2
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
@@ -2014,7 +2002,7 @@ define amdgpu_kernel void @v_sdiv_i25(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GCN-NEXT: v_xor_b32_e32 v1, v1, v0
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v1, v0
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 25
; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
@@ -2053,7 +2041,7 @@ define amdgpu_kernel void @v_sdiv_i25(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_mul_lo_u32 v1, v3, v2
; TONGA-NEXT: v_add_u32_e32 v4, vcc, 1, v3
; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v5, v1
-; TONGA-NEXT: v_subrev_u32_e32 v5, vcc, v2, v1
+; TONGA-NEXT: v_sub_u32_e32 v5, vcc, v1, v2
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
@@ -2061,7 +2049,7 @@ define amdgpu_kernel void @v_sdiv_i25(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
; TONGA-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v0
-; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v1, v0
; TONGA-NEXT: v_bfe_i32 v0, v0, 0, 25
; TONGA-NEXT: buffer_store_dword v0, off, s[0:3], 0
; TONGA-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/select.f16.ll b/llvm/test/CodeGen/AMDGPU/select.f16.ll
index bbdfc76..da454ee 100644
--- a/llvm/test/CodeGen/AMDGPU/select.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/select.f16.ll
@@ -852,19 +852,19 @@ define amdgpu_kernel void @select_v2f16(
; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
; GFX11-TRUE16-NEXT: s_mov_b32 s22, s2
; GFX11-TRUE16-NEXT: s_mov_b32 s23, s3
-; GFX11-TRUE16-NEXT: s_mov_b32 s26, s2
-; GFX11-TRUE16-NEXT: s_mov_b32 s27, s3
; GFX11-TRUE16-NEXT: s_mov_b32 s18, s2
; GFX11-TRUE16-NEXT: s_mov_b32 s19, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s26, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s27, s3
; GFX11-TRUE16-NEXT: s_mov_b32 s6, s2
; GFX11-TRUE16-NEXT: s_mov_b32 s7, s3
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_mov_b32 s20, s12
; GFX11-TRUE16-NEXT: s_mov_b32 s21, s13
-; GFX11-TRUE16-NEXT: s_mov_b32 s24, s14
-; GFX11-TRUE16-NEXT: s_mov_b32 s25, s15
; GFX11-TRUE16-NEXT: s_mov_b32 s16, s10
; GFX11-TRUE16-NEXT: s_mov_b32 s17, s11
+; GFX11-TRUE16-NEXT: s_mov_b32 s24, s14
+; GFX11-TRUE16-NEXT: s_mov_b32 s25, s15
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, off, s[20:23], 0
; GFX11-TRUE16-NEXT: buffer_load_b32 v1, off, s[16:19], 0
; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[24:27], 0
@@ -874,20 +874,18 @@ define amdgpu_kernel void @select_v2f16(
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
-; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.l, v0.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e64 s0, v5.l, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v6.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v3.l, v0.l, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v2.l, v1.l, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v7.l, v1.l, s0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, s8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
@@ -1058,21 +1056,18 @@ define amdgpu_kernel void @select_v2f16_imm_a(
; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[20:23], 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, 0.5, v0.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v1
+; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, 0.5, v0.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v2
; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e64 s0, 0x3900, v3.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v5.l, v1.l, s0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, s4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
@@ -1236,21 +1231,18 @@ define amdgpu_kernel void @select_v2f16_imm_b(
; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[20:23], 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX11-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, 0.5, v0.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v1
+; GFX11-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, 0.5, v0.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v2
; GFX11-TRUE16-NEXT: v_cmp_gt_f16_e64 s0, 0x3900, v3.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v5.l, v1.l, s0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, s4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
@@ -1402,8 +1394,6 @@ define amdgpu_kernel void @select_v2f16_imm_c(
; GFX11-TRUE16-NEXT: s_mov_b32 s19, s3
; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
-; GFX11-TRUE16-NEXT: s_mov_b32 s22, s2
-; GFX11-TRUE16-NEXT: s_mov_b32 s23, s3
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_mov_b32 s16, s8
; GFX11-TRUE16-NEXT: s_mov_b32 s17, s9
@@ -1411,10 +1401,10 @@ define amdgpu_kernel void @select_v2f16_imm_c(
; GFX11-TRUE16-NEXT: s_mov_b32 s13, s7
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, off, s[16:19], 0
; GFX11-TRUE16-NEXT: buffer_load_b32 v1, off, s[12:15], 0
-; GFX11-TRUE16-NEXT: s_mov_b32 s20, s10
-; GFX11-TRUE16-NEXT: s_mov_b32 s21, s11
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s10
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s11
; GFX11-TRUE16-NEXT: s_mov_b32 s1, s5
-; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[20:23], 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[12:15], 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
@@ -1425,12 +1415,9 @@ define amdgpu_kernel void @select_v2f16_imm_c(
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_cmp_nlt_f16_e64 s0, v4.l, v3.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x3800, v2.l, vcc_lo
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x3900, v0.l, s0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x3900, v0.l, s0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, s4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
+; GFX11-TRUE16-NEXT: buffer_store_b32 v1, off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
; GFX11-FAKE16-LABEL: select_v2f16_imm_c:
@@ -1581,8 +1568,6 @@ define amdgpu_kernel void @select_v2f16_imm_d(
; GFX11-TRUE16-NEXT: s_mov_b32 s19, s3
; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
-; GFX11-TRUE16-NEXT: s_mov_b32 s22, s2
-; GFX11-TRUE16-NEXT: s_mov_b32 s23, s3
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_mov_b32 s16, s8
; GFX11-TRUE16-NEXT: s_mov_b32 s17, s9
@@ -1590,10 +1575,10 @@ define amdgpu_kernel void @select_v2f16_imm_d(
; GFX11-TRUE16-NEXT: s_mov_b32 s13, s7
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, off, s[16:19], 0
; GFX11-TRUE16-NEXT: buffer_load_b32 v1, off, s[12:15], 0
-; GFX11-TRUE16-NEXT: s_mov_b32 s20, s10
-; GFX11-TRUE16-NEXT: s_mov_b32 s21, s11
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s10
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s11
; GFX11-TRUE16-NEXT: s_mov_b32 s1, s5
-; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[20:23], 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[12:15], 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
@@ -1604,12 +1589,9 @@ define amdgpu_kernel void @select_v2f16_imm_d(
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e64 s0, v4.l, v3.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x3800, v2.l, vcc_lo
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x3900, v0.l, s0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x3900, v0.l, s0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, s4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
+; GFX11-TRUE16-NEXT: buffer_store_b32 v1, off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
; GFX11-FAKE16-LABEL: select_v2f16_imm_d:
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index 5944342..bbd1793 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -467,28 +467,28 @@ define amdgpu_kernel void @srem_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_readfirstlane_b32 s2, v2
; GCN-NEXT: s_abs_i32 s2, s2
; GCN-NEXT: v_cvt_f32_u32_e32 v2, s2
-; GCN-NEXT: v_readfirstlane_b32 s3, v0
+; GCN-NEXT: v_readfirstlane_b32 s4, v0
; GCN-NEXT: s_sub_i32 s6, 0, s2
-; GCN-NEXT: s_ashr_i32 s5, s3, 31
+; GCN-NEXT: s_ashr_i32 s5, s4, 31
; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; GCN-NEXT: s_abs_i32 s3, s3
-; GCN-NEXT: v_readfirstlane_b32 s4, v3
+; GCN-NEXT: s_abs_i32 s4, s4
+; GCN-NEXT: v_readfirstlane_b32 s3, v3
; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v2
; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
; GCN-NEXT: v_readfirstlane_b32 s7, v0
; GCN-NEXT: s_mul_i32 s6, s6, s7
; GCN-NEXT: s_mul_hi_u32 s6, s7, s6
; GCN-NEXT: s_add_i32 s7, s7, s6
-; GCN-NEXT: s_mul_hi_u32 s6, s3, s7
+; GCN-NEXT: s_mul_hi_u32 s6, s4, s7
; GCN-NEXT: s_mul_i32 s6, s6, s2
-; GCN-NEXT: s_sub_i32 s3, s3, s6
-; GCN-NEXT: s_sub_i32 s6, s3, s2
-; GCN-NEXT: s_cmp_ge_u32 s3, s2
-; GCN-NEXT: s_cselect_b32 s3, s6, s3
-; GCN-NEXT: s_sub_i32 s6, s3, s2
-; GCN-NEXT: s_cmp_ge_u32 s3, s2
-; GCN-NEXT: s_cselect_b32 s2, s6, s3
-; GCN-NEXT: s_abs_i32 s3, s4
+; GCN-NEXT: s_sub_i32 s4, s4, s6
+; GCN-NEXT: s_sub_i32 s6, s4, s2
+; GCN-NEXT: s_cmp_ge_u32 s4, s2
+; GCN-NEXT: s_cselect_b32 s4, s6, s4
+; GCN-NEXT: s_sub_i32 s6, s4, s2
+; GCN-NEXT: s_cmp_ge_u32 s4, s2
+; GCN-NEXT: s_cselect_b32 s2, s6, s4
+; GCN-NEXT: s_abs_i32 s3, s3
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s3
; GCN-NEXT: s_xor_b32 s2, s2, s5
; GCN-NEXT: s_sub_i32 s7, 0, s3
diff --git a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
index 4a6202ea..6daea57 100644
--- a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
@@ -788,12 +788,10 @@ define amdgpu_ps <2 x half> @s_constained_fsub_v2f16_fpexcept_strict(<2 x half>
;
; GFX11-SDAG-TRUE16-LABEL: s_constained_fsub_v2f16_fpexcept_strict:
; GFX11-SDAG-TRUE16: ; %bb.0:
-; GFX11-SDAG-TRUE16-NEXT: v_sub_f16_e64 v0.l, s2, s3
; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s0, s3, 16
; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s1, s2, 16
-; GFX11-SDAG-TRUE16-NEXT: v_sub_f16_e64 v1.l, s1, s0
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-SDAG-TRUE16-NEXT: v_sub_f16_e64 v0.l, s2, s3
+; GFX11-SDAG-TRUE16-NEXT: v_sub_f16_e64 v0.h, s1, s0
; GFX11-SDAG-TRUE16-NEXT: ; return to shader part epilog
;
; GFX11-SDAG-FAKE16-LABEL: s_constained_fsub_v2f16_fpexcept_strict:
diff --git a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
index cd1c532..6a273e5 100644
--- a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
@@ -813,7 +813,7 @@ define amdgpu_kernel void @v_test_sub_v2i16_zext_to_v2i64(ptr addrspace(1) %out,
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
@@ -825,11 +825,9 @@ define amdgpu_kernel void @v_test_sub_v2i16_zext_to_v2i64(ptr addrspace(1) %out,
; GFX11-TRUE16-NEXT: s_mov_b32 s2, -1
; GFX11-TRUE16-NEXT: v_pk_sub_i16 v0, v1, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v2, 16, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.h
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v1
; GFX11-TRUE16-NEXT: buffer_store_b128 v[0:3], off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll b/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
index c9b94e0..99b6ab7 100644
--- a/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
@@ -189,14 +189,11 @@ define amdgpu_kernel void @basic_smax_smin_sgpr(ptr addrspace(1) %out, i32 inreg
; SDAG-GFX11-TRUE16-LABEL: basic_smax_smin_sgpr:
; SDAG-GFX11-TRUE16: ; %bb.0:
; SDAG-GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; SDAG-GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0
; SDAG-GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, s2, 0, 0xff
-; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v1.l, s3, 0, 0xff
-; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; SDAG-GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SDAG-GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; SDAG-GFX11-TRUE16-NEXT: global_store_b32 v2, v0, s[0:1]
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v1.l, s2, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v1.h, s3, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
; SDAG-GFX11-TRUE16-NEXT: s_endpgm
;
; SDAG-GFX11-FAKE16-LABEL: basic_smax_smin_sgpr:
@@ -215,14 +212,11 @@ define amdgpu_kernel void @basic_smax_smin_sgpr(ptr addrspace(1) %out, i32 inreg
; SDAG-GFX12-TRUE16-LABEL: basic_smax_smin_sgpr:
; SDAG-GFX12-TRUE16: ; %bb.0:
; SDAG-GFX12-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; SDAG-GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, 0
; SDAG-GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
-; SDAG-GFX12-TRUE16-NEXT: v_med3_i16 v0.l, s2, 0, 0xff
-; SDAG-GFX12-TRUE16-NEXT: v_med3_i16 v1.l, s3, 0, 0xff
-; SDAG-GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; SDAG-GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SDAG-GFX12-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; SDAG-GFX12-TRUE16-NEXT: global_store_b32 v2, v0, s[0:1]
+; SDAG-GFX12-TRUE16-NEXT: v_med3_i16 v1.l, s2, 0, 0xff
+; SDAG-GFX12-TRUE16-NEXT: v_med3_i16 v1.h, s3, 0, 0xff
+; SDAG-GFX12-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
; SDAG-GFX12-TRUE16-NEXT: s_endpgm
;
; SDAG-GFX12-FAKE16-LABEL: basic_smax_smin_sgpr:
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-and.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-and.ll
index 801324e..dfc59f6 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-and.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-and.ll
@@ -1023,10 +1023,11 @@ define i16 @test_vector_reduce_and_v2i16(<2 x i16> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_and_v2i16:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_and_v2i16:
@@ -1052,10 +1053,11 @@ define i16 @test_vector_reduce_and_v2i16(<2 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_and_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll
index 98919f5..4d5ade4 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll
@@ -1024,10 +1024,11 @@ define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_mul_v2i16:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v2i16:
@@ -1053,10 +1054,11 @@ define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v2i16:
@@ -1298,11 +1300,12 @@ define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) {
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v4i16:
@@ -1331,11 +1334,12 @@ define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v4i16:
@@ -1468,12 +1472,13 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v8i16:
@@ -1509,12 +1514,13 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v8i16:
@@ -1706,12 +1712,13 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v16i16:
@@ -1762,12 +1769,13 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v16i16:
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-or.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-or.ll
index bdb1c22..9e033f5 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-or.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-or.ll
@@ -1046,10 +1046,11 @@ define i16 @test_vector_reduce_or_v2i16(<2 x i16> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_or_v2i16:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_or_v2i16:
@@ -1075,10 +1076,11 @@ define i16 @test_vector_reduce_or_v2i16(<2 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_or_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-xor.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-xor.ll
index cf344ea..166e6c4 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-xor.ll
@@ -992,10 +992,11 @@ define i16 @test_vector_reduce_xor_v2i16(<2 x i16> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_xor_v2i16:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX11-SDAG-TRUE16-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_xor_b32_e32 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_xor_v2i16:
@@ -1021,10 +1022,11 @@ define i16 @test_vector_reduce_xor_v2i16(<2 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_xor_b32_e32 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_xor_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/vector_rebroadcast.ll b/llvm/test/CodeGen/AMDGPU/vector_rebroadcast.ll
index 07e9325..5045540 100644
--- a/llvm/test/CodeGen/AMDGPU/vector_rebroadcast.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector_rebroadcast.ll
@@ -455,10 +455,7 @@ define <2 x i16> @shuffle_v2i16_rebroadcast(ptr addrspace(1) %arg0) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: shuffle_v2i16_rebroadcast:
@@ -499,10 +496,8 @@ define <4 x i16> @shuffle_v4i16_rebroadcast(ptr addrspace(1) %arg0) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -550,10 +545,8 @@ define <8 x i16> @shuffle_v8i16_rebroadcast(ptr addrspace(1) %arg0) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
@@ -613,10 +606,8 @@ define <16 x i16> @shuffle_v16i16_rebroadcast(ptr addrspace(1) %arg0) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
@@ -700,10 +691,8 @@ define <32 x i16> @shuffle_v32i16_rebroadcast(ptr addrspace(1) %arg0) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
diff --git a/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll b/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
index b01e92d..6bf6d54 100644
--- a/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
@@ -1288,9 +1288,8 @@ define <4 x i16> @shuffle_v4i16_2356(ptr addrspace(1) %arg0, ptr addrspace(1) %a
; GFX11-TRUE16-NEXT: global_load_b64 v[2:3], v[2:3], off
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off offset:4
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -2571,10 +2570,9 @@ define <2 x i16> @i16_hi16low16bits(ptr addrspace(1) %x0, ptr addrspace(1) %x1)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: global_load_b32 v1, v[2:3], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: i16_hi16low16bits:
@@ -2626,14 +2624,10 @@ define <2 x i16> @i16_hi16bits(ptr addrspace(1) %x0, ptr addrspace(1) %x1) {
; GFX11-TRUE16-LABEL: i16_hi16bits:
; GFX11-TRUE16: ; %bb.0: ; %entry
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: global_load_b32 v2, v[2:3], off
-; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
+; GFX11-TRUE16-NEXT: global_load_b32 v1, v[0:1], off
+; GFX11-TRUE16-NEXT: global_load_b32 v0, v[2:3], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: i16_hi16bits:
diff --git a/llvm/test/CodeGen/RISCV/div_minsize.ll b/llvm/test/CodeGen/RISCV/div_minsize.ll
index 601821b..794af2f 100644
--- a/llvm/test/CodeGen/RISCV/div_minsize.ll
+++ b/llvm/test/CodeGen/RISCV/div_minsize.ll
@@ -68,3 +68,151 @@ define i32 @testsize4(i32 %x) minsize nounwind {
%div = udiv i32 %x, 33
ret i32 %div
}
+
+define i128 @i128_sdiv(i128 %arg0) minsize nounwind {
+; RV32IM-LABEL: i128_sdiv:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: lw a2, 12(a1)
+; RV32IM-NEXT: lw a3, 8(a1)
+; RV32IM-NEXT: lw a4, 0(a1)
+; RV32IM-NEXT: lw a1, 4(a1)
+; RV32IM-NEXT: srai a5, a2, 31
+; RV32IM-NEXT: srli a5, a5, 30
+; RV32IM-NEXT: add a5, a4, a5
+; RV32IM-NEXT: sltu a4, a5, a4
+; RV32IM-NEXT: srli a5, a5, 2
+; RV32IM-NEXT: add a6, a1, a4
+; RV32IM-NEXT: sltu a1, a6, a1
+; RV32IM-NEXT: and a1, a4, a1
+; RV32IM-NEXT: srli a4, a6, 2
+; RV32IM-NEXT: slli a6, a6, 30
+; RV32IM-NEXT: or a5, a5, a6
+; RV32IM-NEXT: add a1, a3, a1
+; RV32IM-NEXT: srli a6, a1, 2
+; RV32IM-NEXT: sltu a3, a1, a3
+; RV32IM-NEXT: slli a1, a1, 30
+; RV32IM-NEXT: add a2, a2, a3
+; RV32IM-NEXT: or a1, a4, a1
+; RV32IM-NEXT: slli a3, a2, 30
+; RV32IM-NEXT: srai a2, a2, 2
+; RV32IM-NEXT: or a3, a6, a3
+; RV32IM-NEXT: sw a5, 0(a0)
+; RV32IM-NEXT: sw a1, 4(a0)
+; RV32IM-NEXT: sw a3, 8(a0)
+; RV32IM-NEXT: sw a2, 12(a0)
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: i128_sdiv:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: addi sp, sp, -16
+; RV64IM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IM-NEXT: li a2, 4
+; RV64IM-NEXT: li a3, 0
+; RV64IM-NEXT: call __divti3
+; RV64IM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IM-NEXT: addi sp, sp, 16
+; RV64IM-NEXT: ret
+ %div = sdiv i128 %arg0, 4
+ ret i128 %div
+}
+
+define i256 @i256_sdiv(i256 %arg0) minsize nounwind {
+; RV32IM-LABEL: i256_sdiv:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: lw a5, 16(a1)
+; RV32IM-NEXT: lw a4, 20(a1)
+; RV32IM-NEXT: lw a2, 24(a1)
+; RV32IM-NEXT: lw a3, 28(a1)
+; RV32IM-NEXT: lw a6, 0(a1)
+; RV32IM-NEXT: lw a7, 4(a1)
+; RV32IM-NEXT: lw t0, 8(a1)
+; RV32IM-NEXT: lw t1, 12(a1)
+; RV32IM-NEXT: srai a1, a3, 31
+; RV32IM-NEXT: srli a1, a1, 30
+; RV32IM-NEXT: add a1, a6, a1
+; RV32IM-NEXT: sltu t2, a1, a6
+; RV32IM-NEXT: add a6, a7, t2
+; RV32IM-NEXT: sltu a7, a6, a7
+; RV32IM-NEXT: and t2, t2, a7
+; RV32IM-NEXT: add a7, t0, t2
+; RV32IM-NEXT: sltu t3, a7, t0
+; RV32IM-NEXT: add t0, t1, t3
+; RV32IM-NEXT: beqz t2, .LBB5_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: sltu t1, t0, t1
+; RV32IM-NEXT: and t2, t3, t1
+; RV32IM-NEXT: .LBB5_2:
+; RV32IM-NEXT: add t2, a5, t2
+; RV32IM-NEXT: srli t1, t0, 2
+; RV32IM-NEXT: srli t3, a7, 2
+; RV32IM-NEXT: slli t0, t0, 30
+; RV32IM-NEXT: slli a7, a7, 30
+; RV32IM-NEXT: or t0, t3, t0
+; RV32IM-NEXT: srli t3, a6, 2
+; RV32IM-NEXT: srli a1, a1, 2
+; RV32IM-NEXT: slli a6, a6, 30
+; RV32IM-NEXT: sltu a5, t2, a5
+; RV32IM-NEXT: or a7, t3, a7
+; RV32IM-NEXT: srli t3, t2, 2
+; RV32IM-NEXT: slli t2, t2, 30
+; RV32IM-NEXT: or a1, a1, a6
+; RV32IM-NEXT: add a6, a4, a5
+; RV32IM-NEXT: or t1, t1, t2
+; RV32IM-NEXT: sltu a4, a6, a4
+; RV32IM-NEXT: srli t2, a6, 2
+; RV32IM-NEXT: slli a6, a6, 30
+; RV32IM-NEXT: sw a1, 0(a0)
+; RV32IM-NEXT: sw a7, 4(a0)
+; RV32IM-NEXT: sw t0, 8(a0)
+; RV32IM-NEXT: sw t1, 12(a0)
+; RV32IM-NEXT: and a4, a5, a4
+; RV32IM-NEXT: or a1, t3, a6
+; RV32IM-NEXT: add a4, a2, a4
+; RV32IM-NEXT: srli a5, a4, 2
+; RV32IM-NEXT: sltu a2, a4, a2
+; RV32IM-NEXT: slli a4, a4, 30
+; RV32IM-NEXT: add a2, a3, a2
+; RV32IM-NEXT: or a3, t2, a4
+; RV32IM-NEXT: slli a4, a2, 30
+; RV32IM-NEXT: srai a2, a2, 2
+; RV32IM-NEXT: or a4, a5, a4
+; RV32IM-NEXT: sw a1, 16(a0)
+; RV32IM-NEXT: sw a3, 20(a0)
+; RV32IM-NEXT: sw a4, 24(a0)
+; RV32IM-NEXT: sw a2, 28(a0)
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: i256_sdiv:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: ld a2, 24(a1)
+; RV64IM-NEXT: ld a3, 16(a1)
+; RV64IM-NEXT: ld a4, 0(a1)
+; RV64IM-NEXT: ld a1, 8(a1)
+; RV64IM-NEXT: srai a5, a2, 63
+; RV64IM-NEXT: srli a5, a5, 62
+; RV64IM-NEXT: add a5, a4, a5
+; RV64IM-NEXT: sltu a4, a5, a4
+; RV64IM-NEXT: srli a5, a5, 2
+; RV64IM-NEXT: add a6, a1, a4
+; RV64IM-NEXT: sltu a1, a6, a1
+; RV64IM-NEXT: and a1, a4, a1
+; RV64IM-NEXT: srli a4, a6, 2
+; RV64IM-NEXT: slli a6, a6, 62
+; RV64IM-NEXT: or a5, a5, a6
+; RV64IM-NEXT: add a1, a3, a1
+; RV64IM-NEXT: srli a6, a1, 2
+; RV64IM-NEXT: sltu a3, a1, a3
+; RV64IM-NEXT: slli a1, a1, 62
+; RV64IM-NEXT: add a2, a2, a3
+; RV64IM-NEXT: or a1, a4, a1
+; RV64IM-NEXT: slli a3, a2, 62
+; RV64IM-NEXT: srai a2, a2, 2
+; RV64IM-NEXT: or a3, a6, a3
+; RV64IM-NEXT: sd a5, 0(a0)
+; RV64IM-NEXT: sd a1, 8(a0)
+; RV64IM-NEXT: sd a3, 16(a0)
+; RV64IM-NEXT: sd a2, 24(a0)
+; RV64IM-NEXT: ret
+ %div = sdiv i256 %arg0, 4
+ ret i256 %div
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll
new file mode 100644
index 0000000..489323b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll
@@ -0,0 +1,186 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen, iXLen);
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ iXLen);
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @test_half_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x half> %3, <vscale x 1 x half> %4, ptr %ptr) nounwind {
+; CHECK-LABEL: test_half_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a2, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v10, v10, v11
+; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a2
+; CHECK-NEXT: vse16.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> poison,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x half> %4,
+ iXLen 0, iXLen %2)
+
+ %b = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ call void @llvm.riscv.vse(<vscale x 1 x half> %a, ptr %ptr, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %b
+}
+
+define <vscale x 1 x bfloat> @test_i32_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x i32> %3, <vscale x 1 x i32> %4, ptr %ptr) nounwind {
+; CHECK-LABEL: test_i32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vadd.vv v10, v10, v11
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: vse32.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> poison,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i32> %4,
+ iXLen %2)
+
+ %b = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ call void @llvm.riscv.vse(<vscale x 1 x i32> %a, ptr %ptr, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %b
+}
+
+define <vscale x 1 x bfloat> @test_half_bf16_half(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x half> %3, <vscale x 1 x half> %4, ptr %ptr) nounwind {
+; CHECK-LABEL: test_half_bf16_half:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a2, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v10, v10, v11
+; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v9, v10, v11
+; CHECK-NEXT: fsrm a2
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v9, (a1)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> poison,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x half> %4,
+ iXLen 0, iXLen %2)
+
+ %b = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ %c = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> poison,
+ <vscale x 1 x half> %a,
+ <vscale x 1 x half> %4,
+ iXLen 0, iXLen %2)
+
+ store <vscale x 1 x half> %c, ptr %ptr
+
+ ret <vscale x 1 x bfloat> %b
+}
+
+define <vscale x 1 x bfloat> @test_bf16_half_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x half> %3, <vscale x 1 x half> %4, ptr %ptr) nounwind {
+; CHECK-LABEL: test_bf16_half_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a2, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v10, v10, v11
+; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a2
+; CHECK-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vse16.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ %b = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> poison,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x half> %4,
+ iXLen 0, iXLen %2)
+
+ %c = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %a,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ store <vscale x 1 x half> %b, ptr %ptr
+
+ ret <vscale x 1 x bfloat> %c
+}
+
+define <vscale x 1 x bfloat> @test_bf16_i16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x i16> %3, <vscale x 1 x i16> %4, ptr %ptr) nounwind {
+; CHECK-LABEL: test_bf16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a2, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: vadd.vv v9, v10, v11
+; CHECK-NEXT: fsrm a2
+; CHECK-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vse16.v v9, (a1)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ %b = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> poison,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i16> %4,
+ iXLen %2)
+
+ store <vscale x 1 x i16> %b, ptr %ptr
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll
new file mode 100644
index 0000000..db1b081
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll
@@ -0,0 +1,607 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll
new file mode 100644
index 0000000..d7d49b3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll
@@ -0,0 +1,294 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16.nxv1bf16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 1 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16.nxv1bf16(
+ <vscale x 1 x i16> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16.nxv1bf16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfclass.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16.nxv1bf16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16.nxv2bf16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 2 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16.nxv2bf16(
+ <vscale x 2 x i16> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16.nxv2bf16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfclass.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16.nxv2bf16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16.nxv4bf16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 4 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16.nxv4bf16(
+ <vscale x 4 x i16> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16.nxv4bf16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfclass.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16.nxv4bf16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16.nxv8bf16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 8 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16.nxv8bf16(
+ <vscale x 8 x i16> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16.nxv8bf16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfclass.v v8, v10, v0.t
+; CHECK-NEXT: ret
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16.nxv8bf16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16.nxv16bf16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 16 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16.nxv16bf16(
+ <vscale x 16 x i16> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16.nxv16bf16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfclass.v v8, v12, v0.t
+; CHECK-NEXT: ret
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16.nxv16bf16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16.nxv32bf16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 32 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16.nxv32bf16(
+ <vscale x 32 x i16> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16.nxv32bf16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, tu, mu
+; CHECK-NEXT: vfclass.v v8, v16, v0.t
+; CHECK-NEXT: ret
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16.nxv32bf16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 32 x i16> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll
new file mode 100644
index 0000000..13821d7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll
new file mode 100644
index 0000000..09fc199
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmadd.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmadd.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmacc.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll
new file mode 100644
index 0000000..a337d30
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll
@@ -0,0 +1,571 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll
new file mode 100644
index 0000000..86ba7c7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll
@@ -0,0 +1,258 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmerge_vzm_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 1 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vfmerge_vzm_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 2 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vfmerge_vzm_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 4 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vfmerge_vzm_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 8 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vfmerge_vzm_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 16 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vfmerge_vzm_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 32 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll
new file mode 100644
index 0000000..37c0cf5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll
@@ -0,0 +1,571 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll
new file mode 100644
index 0000000..948d219
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmsac.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmsac.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmsac.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmsac.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsub.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsub.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll
new file mode 100644
index 0000000..6838f37
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmsub.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmsub.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmsub.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmsub.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsub.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsac.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll
new file mode 100644
index 0000000..44bce72
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll
@@ -0,0 +1,607 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll
new file mode 100644
index 0000000..fbc73119
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+experimental-zvfbfa -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+experimental-zvfbfa -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv1bf16(<vscale x 1 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat> %0)
+ ret bfloat %a
+}
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv2bf16(<vscale x 2 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat> %0)
+ ret bfloat %a
+}
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv4bf16(<vscale x 4 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat> %0)
+ ret bfloat %a
+}
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv8bf16(<vscale x 8 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat> %0)
+ ret bfloat %a
+}
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv16bf16(<vscale x 16 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat> %0)
+ ret bfloat %a
+}
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv32bf16(<vscale x 32 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat> %0)
+ ret bfloat %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll
new file mode 100644
index 0000000..a810809
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll
@@ -0,0 +1,161 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16(<vscale x 1 x bfloat>, bfloat, iXLen)
+
+define <vscale x 1 x bfloat> @intrinsic_vfmv.s.f_f_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16(<vscale x 2 x bfloat>, bfloat, iXLen)
+
+define <vscale x 2 x bfloat> @intrinsic_vfmv.s.f_f_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16(<vscale x 4 x bfloat>, bfloat, iXLen)
+
+define <vscale x 4 x bfloat> @intrinsic_vfmv.s.f_f_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16(<vscale x 8 x bfloat>, bfloat, iXLen)
+
+define <vscale x 8 x bfloat> @intrinsic_vfmv.s.f_f_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16(<vscale x 16 x bfloat>, bfloat, iXLen)
+
+define <vscale x 16 x bfloat> @intrinsic_vfmv.s.f_f_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16(<vscale x 32 x bfloat>, bfloat, iXLen)
+
+define <vscale x 32 x bfloat> @intrinsic_vfmv.s.f_f_nxv32bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16(<vscale x 1 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16(<vscale x 2 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16(<vscale x 4 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16(<vscale x 8 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16(<vscale x 16 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16(<vscale x 32 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmv.s.f_f_nxv1bf16_negzero(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1bf16_negzero:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a1, 1048568
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16(<vscale x 1 x bfloat> %0, bfloat -0.0, iXLen %1)
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll
new file mode 100644
index 0000000..f3293dd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll
@@ -0,0 +1,216 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmv.v.f_f_nxv1bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmv.v.f_f_nxv2bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmv.v.f_f_nxv4bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmv.v.f_f_nxv8bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmv.v.f_f_nxv16bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmv.v.f_f_nxv32bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmv.v.f_zero_nxv1bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vmv.v.i_zero_nxv2bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vmv.v.i_zero_nxv4bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vmv.v.i_zero_nxv8bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vmv.v.i_zero_nxv16bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vmv.v.i_zero_nxv32bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll
new file mode 100644
index 0000000..7d587fd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll
@@ -0,0 +1,226 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x float>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x float> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32(<vscale x 1 x bfloat> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x float>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x float> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32(<vscale x 2 x bfloat> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x float>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x float> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32(<vscale x 4 x bfloat> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x float>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x float> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32(<vscale x 8 x bfloat> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x float>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x float> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x float>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32(<vscale x 16 x bfloat> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll
new file mode 100644
index 0000000..ee9e3d1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll
@@ -0,0 +1,270 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll
new file mode 100644
index 0000000..521f727
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll
@@ -0,0 +1,270 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll
new file mode 100644
index 0000000..ab9ebad
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll
@@ -0,0 +1,288 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v10, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v12, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v16, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll
new file mode 100644
index 0000000..61c6803
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll
@@ -0,0 +1,288 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v10, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v12, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v16, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll
new file mode 100644
index 0000000..4b4091b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmacc.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmacc.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmacc.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmacc.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmadd.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmadd.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll
new file mode 100644
index 0000000..2bb6bf5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmadd.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmadd.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmacc.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll
new file mode 100644
index 0000000..cfbaafa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmsac.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmsac.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmsac.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmsac.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsub.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsub.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll
new file mode 100644
index 0000000..5ebbb90c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmsub.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmsub.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsac.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll
new file mode 100644
index 0000000..1211415
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll
@@ -0,0 +1,282 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrec7_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrec7_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrec7_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrec7_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrec7_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrec7_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x i1> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16(
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll
new file mode 100644
index 0000000..4626b86
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll
@@ -0,0 +1,264 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x i1> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16(
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll
new file mode 100644
index 0000000..54a6d48
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll
@@ -0,0 +1,282 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll
new file mode 100644
index 0000000..2cd698d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll
@@ -0,0 +1,571 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll
new file mode 100644
index 0000000..08340be
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll
@@ -0,0 +1,571 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll
new file mode 100644
index 0000000..e51a42e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll
@@ -0,0 +1,571 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll
new file mode 100644
index 0000000..c65719c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll
@@ -0,0 +1,288 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll
new file mode 100644
index 0000000..57a4898
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll
@@ -0,0 +1,294 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfslide1up.vf v10, v8, fa0
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfslide1up.vf v12, v8, fa0
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfslide1up.vf v16, v8, fa0
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll
new file mode 100644
index 0000000..aea7521
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll
@@ -0,0 +1,559 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll
new file mode 100644
index 0000000..62feac8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll
@@ -0,0 +1,519 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwadd.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwadd.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vv v8, v11, v10
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vv v8, v14, v12
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv4r.v v20, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vv v8, v20, v16
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwadd.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwadd.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vf v8, v10, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vf v8, v12, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vf v8, v16, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll
new file mode 100644
index 0000000..c5417e8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll
@@ -0,0 +1,773 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwadd.wv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwadd.wv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfwadd.wv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfwadd.wv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfwadd.wv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl4re16.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwadd.wv v10, v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwadd.wv v10, v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.wv v8, v10, v12
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.wv v8, v12, v16
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll
new file mode 100644
index 0000000..b7df45b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll
@@ -0,0 +1,264 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8(<vscale x 1 x bfloat> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8(<vscale x 2 x bfloat> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8(<vscale x 4 x bfloat> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8(<vscale x 8 x bfloat> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8(<vscale x 16 x bfloat> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8(<vscale x 32 x bfloat> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll
new file mode 100644
index 0000000..c370261
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll
@@ -0,0 +1,264 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8(<vscale x 1 x bfloat> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8(<vscale x 2 x bfloat> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8(<vscale x 4 x bfloat> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8(<vscale x 8 x bfloat> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8(<vscale x 16 x bfloat> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8(<vscale x 32 x bfloat> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll
new file mode 100644
index 0000000..a3f6678
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll
@@ -0,0 +1,506 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwmsac.vv v8, v10, v11
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwmsac.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwmsac.vv v8, v12, v14
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwmsac.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwmsac.vv v8, v16, v20
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwmsac.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll
new file mode 100644
index 0000000..577b93a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll
@@ -0,0 +1,519 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwmul.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwmul.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vv v8, v11, v10
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vv v8, v14, v12
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv4r.v v20, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vv v8, v20, v16
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwmul.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwmul.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vf v8, v10, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vf v8, v12, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vf v8, v16, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll
new file mode 100644
index 0000000..1e05e4c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll
@@ -0,0 +1,506 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwnmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwnmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwnmacc.vv v8, v10, v11
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwnmacc.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwnmacc.vv v8, v12, v14
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwnmacc.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwnmacc.vv v8, v16, v20
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwnmacc.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll
new file mode 100644
index 0000000..223ad4f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll
@@ -0,0 +1,506 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwnmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwnmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwnmsac.vv v8, v10, v11
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwnmsac.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwnmsac.vv v8, v12, v14
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwnmsac.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwnmsac.vv v8, v16, v20
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwnmsac.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll
new file mode 100644
index 0000000..d993e4e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll
@@ -0,0 +1,519 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwsub.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwsub.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vv v8, v11, v10
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vv v8, v14, v12
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv4r.v v20, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vv v8, v20, v16
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwsub.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwsub.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vf v8, v10, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vf v8, v12, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vf v8, v16, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll
new file mode 100644
index 0000000..b22899a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll
@@ -0,0 +1,773 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwsub.wv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwsub.wv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfwsub.wv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfwsub.wv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfwsub.wv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl4re16.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwsub.wv v10, v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwsub.wv v10, v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.wv v8, v10, v12
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.wv v8, v12, v16
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll
new file mode 100644
index 0000000..9bd859b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfeq.vv v0, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmfeq.vv v0, v8, v10
+; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfeq.vv v0, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmfeq.vv v0, v8, v12
+; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfeq.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfeq.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfeq.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfeq.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfeq.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll
new file mode 100644
index 0000000..73946dc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v10, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmfle.vv v0, v10, v8
+; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v12, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmfle.vv v0, v12, v8
+; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfge.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfge.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfge.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfge.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfge.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll
new file mode 100644
index 0000000..fac324c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v10, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmflt.vv v0, v10, v8
+; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v12, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmflt.vv v0, v12, v8
+; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfgt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfgt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfgt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfgt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfgt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll
new file mode 100644
index 0000000..8356b7b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmfle.vv v0, v8, v10
+; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmfle.vv v0, v8, v12
+; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfle.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfle.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfle.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfle.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfle.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll
new file mode 100644
index 0000000..2e1bcc5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmflt.vv v0, v8, v10
+; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmflt.vv v0, v8, v12
+; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmflt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmflt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmflt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmflt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmflt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll
new file mode 100644
index 0000000..283ffc5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfne.vv v0, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmfne.vv v0, v8, v10
+; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfne.vv v0, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmfne.vv v0, v8, v12
+; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfne.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfne.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfne.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfne.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfne.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_maximal_reconvergence/enable-maximal-reconvergence.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_maximal_reconvergence/enable-maximal-reconvergence.ll
new file mode 100644
index 0000000..105f4a4
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_maximal_reconvergence/enable-maximal-reconvergence.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv1.6-unknown-vulkan1.3-compute --spirv-ext=+SPV_KHR_maximal_reconvergence %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute --spirv-ext=+SPV_KHR_maximal_reconvergence %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpCapability Shader
+; CHECK: OpExtension "SPV_KHR_maximal_reconvergence"
+; CHECK-NOT: OpExecutionMode {{.*}} MaximallyReconvergesKHR
+; CHECK: OpExecutionMode [[main:%[0-9]+]] MaximallyReconvergesKHR
+; CHECK-NOT: OpExecutionMode {{.*}} MaximallyReconvergesKHR
+; CHECK: OpName [[main]] "main"
+define void @main() local_unnamed_addr #0 {
+entry:
+ ret void
+}
+
+define void @negative() local_unnamed_addr #1 {
+entry:
+ ret void
+}
+
+attributes #0 = { "enable-maximal-reconvergence"="true" "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+attributes #1 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll
new file mode 100644
index 0000000..cce1eda
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll
@@ -0,0 +1,22 @@
+; RUN: llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s --match-full-lines
+
+%"__cblayout_$Globals" = type <{ i32 }>
+
+@i = external hidden local_unnamed_addr addrspace(12) global i32, align 4
+@ReadWriteBuf.str = private unnamed_addr constant [13 x i8] c"ReadWriteBuf\00", align 1
+@"$Globals.cb" = local_unnamed_addr global target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) poison
+@"$Globals.str" = private unnamed_addr constant [9 x i8] c"$Globals\00", align 1
+
+; CHECK: OpCapability Shader
+; CHECK: OpCapability StorageTexelBufferArrayDynamicIndexingEXT
+
+define void @main() local_unnamed_addr #0 {
+entry:
+ %"$Globals.cb_h.i.i" = tail call target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) @"llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_tspirv.Layout_s___cblayout_$Globalss_4_0t_2_0t"(i32 1, i32 0, i32 1, i32 0, ptr nonnull @"$Globals.str")
+ store target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) %"$Globals.cb_h.i.i", ptr @"$Globals.cb", align 8
+ %0 = load i32, ptr addrspace(12) @i, align 4
+ %1 = tail call target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) @llvm.spv.resource.handlefromimplicitbinding.tspirv.Image_i32_5_2_0_0_2_33t(i32 0, i32 0, i32 64, i32 %0, ptr nonnull @ReadWriteBuf.str)
+ %2 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_i32_5_2_0_0_2_33t(target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) %1, i32 98)
+ store i32 99, ptr addrspace(11) %2, align 4
+ ret void
+} \ No newline at end of file
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll
new file mode 100644
index 0000000..da69a2f
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s --match-full-lines
+
+%"__cblayout_$Globals" = type <{ i32 }>
+
+@i = external hidden local_unnamed_addr addrspace(12) global i32, align 4
+@ReadWriteStructuredBuf.str = private unnamed_addr constant [23 x i8] c"ReadWriteStructuredBuf\00", align 1
+@"$Globals.cb" = local_unnamed_addr global target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) poison
+@"$Globals.str" = private unnamed_addr constant [9 x i8] c"$Globals\00", align 1
+
+; CHECK: OpCapability Shader
+; CHECK: OpCapability StorageBufferArrayDynamicIndexing
+define void @main() local_unnamed_addr #0 {
+entry:
+ %"$Globals.cb_h.i.i" = tail call target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) @"llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_tspirv.Layout_s___cblayout_$Globalss_4_0t_2_0t"(i32 2, i32 0, i32 1, i32 0, ptr nonnull @"$Globals.str")
+ store target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) %"$Globals.cb_h.i.i", ptr @"$Globals.cb", align 8
+ %0 = load i32, ptr addrspace(12) @i, align 4
+ %1 = tail call target("spirv.VulkanBuffer", [0 x i32], 12, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_a0i32_12_1t(i32 0, i32 0, i32 64, i32 %0, ptr nonnull @ReadWriteStructuredBuf.str)
+ %2 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0i32_12_1t(target("spirv.VulkanBuffer", [0 x i32], 12, 1) %1, i32 99)
+ store i32 98, ptr addrspace(11) %2, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/StructuredBufferNonUniformIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWBufferNonUniformIdx.ll
index 92efad9..92efad9 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/StructuredBufferNonUniformIdx.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWBufferNonUniformIdx.ll
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWStructuredBufferNonUniformIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWStructuredBufferNonUniformIdx.ll
index 2a12baf..a820e7a 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWStructuredBufferNonUniformIdx.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWStructuredBufferNonUniformIdx.ll
@@ -3,6 +3,7 @@
; CHECK-DAG: OpCapability Shader
; CHECK-DAG: OpCapability ShaderNonUniformEXT
+; CHECK-DAG: OpCapability StorageBufferArrayNonUniformIndexingEXT
; CHECK-DAG: OpDecorate {{%[0-9]+}} NonUniformEXT
; CHECK-DAG: OpDecorate {{%[0-9]+}} NonUniformEXT
; CHECK-DAG: OpDecorate {{%[0-9]+}} NonUniformEXT
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageDynIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageConstIdx.ll
index d002097..e4ec231 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageDynIdx.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageConstIdx.ll
@@ -4,8 +4,8 @@
@.str.b0 = private unnamed_addr constant [3 x i8] c"B0\00", align 1
; CHECK-DAG: OpCapability Shader
-; CHECK-DAG: OpCapability StorageImageArrayDynamicIndexing
; CHECK-DAG: OpCapability Image1D
+; CHECK-DAG: OpCapability Int8
; CHECK-NOT: OpCapability
; CHECK-DAG: OpDecorate [[Var:%[0-9]+]] DescriptorSet 3
diff --git a/llvm/test/CodeGen/X86/avx-shift.ll b/llvm/test/CodeGen/X86/avx-shift.ll
index c9c09d7..3bce843 100644
--- a/llvm/test/CodeGen/X86/avx-shift.ll
+++ b/llvm/test/CodeGen/X86/avx-shift.ll
@@ -201,7 +201,7 @@ define <8 x i32> @vshift08_add(<8 x i32> %a, <8 x i32> %y) {
define <4 x i32> @vshift13(<4 x i32> %in) {
; CHECK-LABEL: vshift13:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2,4,16]
; CHECK-NEXT: retq
%T = shl <4 x i32> %in, <i32 0, i32 1, i32 2, i32 4>
ret <4 x i32> %T
diff --git a/llvm/test/CodeGen/X86/avx2-arith.ll b/llvm/test/CodeGen/X86/avx2-arith.ll
index 70b3b99..1133cdfd 100644
--- a/llvm/test/CodeGen/X86/avx2-arith.ll
+++ b/llvm/test/CodeGen/X86/avx2-arith.ll
@@ -199,12 +199,12 @@ define <8 x i32> @mul_const5(<8 x i32> %x) {
define <8 x i32> @mul_const6(<8 x i32> %x) {
; X86-LABEL: mul_const6:
; X86: # %bb.0:
-; X86-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # [0,0,0,2,0,2,0,0]
; X86-NEXT: retl
;
; X64-LABEL: mul_const6:
; X64: # %bb.0:
-; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,0,0,2,0,2,0,0]
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 2, i32 0, i32 2, i32 0, i32 0>
ret <8 x i32> %y
diff --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll
index ae4d24f..29c41ca 100644
--- a/llvm/test/CodeGen/X86/combine-mul.ll
+++ b/llvm/test/CodeGen/X86/combine-mul.ll
@@ -66,7 +66,7 @@ define <4 x i32> @combine_vec_mul_pow2a(<4 x i32> %x) {
define <4 x i32> @combine_vec_mul_pow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_pow2b:
; SSE: # %bb.0:
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,16]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_pow2b:
@@ -120,12 +120,12 @@ define <4 x i32> @combine_vec_mul_negpow2a(<4 x i32> %x) {
define <4 x i32> @combine_vec_mul_negpow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_negpow2b:
; SSE: # %bb.0:
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4294967295,4294967294,4294967292,4294967280]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_negpow2b:
; AVX: # %bb.0:
-; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4294967295,4294967294,4294967292,4294967280]
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, <i32 -1, i32 -2, i32 -4, i32 -16>
ret <4 x i32> %1
@@ -176,12 +176,12 @@ define <4 x i64> @combine_vec_mul_negpow2c(<4 x i64> %x) {
define <4 x i32> @combine_vec_mul_shl_const(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_shl_const:
; SSE: # %bb.0:
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,12,1280,458752]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_shl_const:
; AVX: # %bb.0:
-; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2,12,1280,458752]
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
%2 = mul <4 x i32> %1, <i32 1, i32 3, i32 5, i32 7>
@@ -193,7 +193,7 @@ define <4 x i32> @combine_vec_mul_shl_oneuse0(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_oneuse0:
; SSE: # %bb.0:
; SSE-NEXT: pmulld %xmm1, %xmm0
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,256,65536]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_shl_oneuse0:
@@ -210,7 +210,7 @@ define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_oneuse1:
; SSE: # %bb.0:
; SSE-NEXT: pmulld %xmm1, %xmm0
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,256,65536]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_shl_oneuse1:
@@ -226,7 +226,7 @@ define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_multiuse0:
; SSE: # %bb.0:
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,256,65536]
; SSE-NEXT: pmulld %xmm0, %xmm1
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
@@ -246,7 +246,7 @@ define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_multiuse1:
; SSE: # %bb.0:
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,256,65536]
; SSE-NEXT: pmulld %xmm0, %xmm1
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
@@ -268,13 +268,13 @@ define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_mul_add(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_add:
; SSE: # %bb.0:
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,6,2,0]
; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_add:
; AVX: # %bb.0:
-; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4,6,2,0]
; AVX-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = add <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
diff --git a/llvm/test/CodeGen/X86/combine-multiplies.ll b/llvm/test/CodeGen/X86/combine-multiplies.ll
index a5d9846..4bdf20d 100644
--- a/llvm/test/CodeGen/X86/combine-multiplies.ll
+++ b/llvm/test/CodeGen/X86/combine-multiplies.ll
@@ -142,9 +142,9 @@ define void @testCombineMultiplies_non_splat(<4 x i32> %v1) nounwind {
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [11,22,33,44]
; CHECK-NEXT: paddd %xmm0, %xmm1
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [22,33,44,55]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [33,u,55,u]
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,726,1452,2420]
diff --git a/llvm/test/CodeGen/X86/combine-pmuldq.ll b/llvm/test/CodeGen/X86/combine-pmuldq.ll
index 70335f8..ff5329c 100644
--- a/llvm/test/CodeGen/X86/combine-pmuldq.ll
+++ b/llvm/test/CodeGen/X86/combine-pmuldq.ll
@@ -204,16 +204,16 @@ define i32 @PR43159(ptr %a0) {
; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [344322273,344322273,1916962805,1916962805]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrld $1, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7]
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,2147483648,2147483648]
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: psrld $7, %xmm0
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [1645975491,344322273,2164392969,1916962805]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE-NEXT: psrld $6, %xmm1
; SSE-NEXT: movd %xmm1, %edi
@@ -226,15 +226,15 @@ define i32 @PR43159(ptr %a0) {
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [344322273,344322273,1916962805,1916962805]
; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [2147483648,2147483648,2147483648,2147483648]
; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpsrld $7, %xmm1, %xmm1
; AVX1-NEXT: vpsrld $1, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1645975491,344322273,2164392969,1916962805]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX1-NEXT: vpsrld $6, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %edi
@@ -247,9 +247,9 @@ define i32 @PR43159(ptr %a0) {
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [344322273,u,1916962805,u]
; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [1645975491,344322273,2164392969,1916962805]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
@@ -270,9 +270,9 @@ define i32 @PR43159(ptr %a0) {
; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [344322273,u,1916962805,u]
; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [1645975491,344322273,2164392969,1916962805]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
; AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
@@ -293,9 +293,9 @@ define i32 @PR43159(ptr %a0) {
; AVX512DQVL: # %bb.0: # %entry
; AVX512DQVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [344322273,u,1916962805,u]
; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [1645975491,344322273,2164392969,1916962805]
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
; AVX512DQVL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/combine-rotates.ll b/llvm/test/CodeGen/X86/combine-rotates.ll
index 65d74c8..e7152ec 100644
--- a/llvm/test/CodeGen/X86/combine-rotates.ll
+++ b/llvm/test/CodeGen/X86/combine-rotates.ll
@@ -10,9 +10,9 @@ define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
; SSE2-LABEL: combine_vec_rot_rot:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [524288,131072,32768,8192]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [131072,u,8192,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll
index 6bcbfe1..f7baee9 100644
--- a/llvm/test/CodeGen/X86/combine-sdiv.ll
+++ b/llvm/test/CodeGen/X86/combine-sdiv.ll
@@ -2927,7 +2927,7 @@ define <16 x i8> @pr38658(<16 x i8> %x) {
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,0,0,0,0,0,0,37632]
+; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,147]
; SSE2-NEXT: psrlw $8, %xmm3
; SSE2-NEXT: packuswb %xmm3, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm0
@@ -2947,7 +2947,7 @@ define <16 x i8> @pr38658(<16 x i8> %x) {
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,0,0,0,0,37632]
+; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,147]
; SSE41-NEXT: psrlw $8, %xmm2
; SSE41-NEXT: packuswb %xmm2, %xmm1
; SSE41-NEXT: paddb %xmm0, %xmm1
@@ -2971,7 +2971,7 @@ define <16 x i8> @pr38658(<16 x i8> %x) {
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,0,0,0,0,0,0,37632]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,147]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm1
@@ -3044,7 +3044,7 @@ define <16 x i8> @pr38658(<16 x i8> %x) {
; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; XOP-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,0,0,0,0,0,0,37632]
+; XOP-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,147]
; XOP-NEXT: vpperm {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15],xmm2[1,3,5,7,9,11,13,15]
; XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; XOP-NEXT: vpshab {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
diff --git a/llvm/test/CodeGen/X86/combine-shl.ll b/llvm/test/CodeGen/X86/combine-shl.ll
index 1ce10c37..9548967 100644
--- a/llvm/test/CodeGen/X86/combine-shl.ll
+++ b/llvm/test/CodeGen/X86/combine-shl.ll
@@ -88,7 +88,7 @@ define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
; SSE2-NEXT: pmuludq %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [32768,u,8192,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: movdqa %xmm1, %xmm0
@@ -97,7 +97,7 @@ define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
; SSE41-LABEL: combine_vec_shl_known_zero1:
; SSE41: # %bb.0:
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [65536,32768,16384,8192]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_known_zero1:
@@ -198,16 +198,16 @@ define <4 x i32> @combine_vec_shl_shl1(<4 x i32> %x) {
; SSE2-LABEL: combine_vec_shl_shl1:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,64,256,1024]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [64,u,1024,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: combine_vec_shl_shl1:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,64,256,1024]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_shl1:
@@ -304,17 +304,17 @@ define <8 x i32> @combine_vec_shl_ext_shl2(<8 x i16> %x) {
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [131072,524288,2097152,8388608]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [524288,u,8388608,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [33554432,134217728,536870912,2147483648]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [134217728,u,2147483648,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: movdqa %xmm2, %xmm0
@@ -323,10 +323,10 @@ define <8 x i32> @combine_vec_shl_ext_shl2(<8 x i16> %x) {
; SSE41-LABEL: combine_vec_shl_ext_shl2:
; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxwd %xmm0, %xmm2
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [131072,524288,2097152,8388608]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE41-NEXT: pmovsxwd %xmm0, %xmm1
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [33554432,134217728,536870912,2147483648]
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -673,9 +673,9 @@ define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) {
; SSE2-LABEL: combine_vec_shl_add1:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,8,16]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4,u,16,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -683,7 +683,7 @@ define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) {
;
; SSE41-LABEL: combine_vec_shl_add1:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,8,16]
; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -726,9 +726,9 @@ define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) {
; SSE2-LABEL: combine_vec_shl_or1:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,8,16]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4,u,16,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -736,7 +736,7 @@ define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) {
;
; SSE41-LABEL: combine_vec_shl_or1:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,8,16]
; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -765,7 +765,7 @@ define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) {
;
; SSE41-LABEL: combine_vec_shl_mul0:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [20,20,20,20]
; SSE41-NEXT: retq
;
; AVX2-LABEL: combine_vec_shl_mul0:
@@ -787,21 +787,21 @@ define <4 x i32> @combine_vec_shl_mul1(<4 x i32> %x) {
; SSE2-LABEL: combine_vec_shl_mul1:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [10,24,56,128]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [24,u,128,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: combine_vec_shl_mul1:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [10,24,56,128]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_mul1:
; AVX: # %bb.0:
-; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [10,24,56,128]
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
%2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>
@@ -813,9 +813,9 @@ define <4 x i32> @combine_vec_add_shl_nonsplat(<4 x i32> %a0) {
; SSE2-LABEL: combine_vec_add_shl_nonsplat:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,8,16,32]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [8,u,32,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -823,7 +823,7 @@ define <4 x i32> @combine_vec_add_shl_nonsplat(<4 x i32> %a0) {
;
; SSE41-LABEL: combine_vec_add_shl_nonsplat:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,8,16,32]
; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -852,7 +852,7 @@ define <4 x i32> @combine_vec_add_shl_and_nonsplat(<4 x i32> %a0) {
; SSE2-NEXT: pmuludq %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [8,u,32,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -863,7 +863,7 @@ define <4 x i32> @combine_vec_add_shl_and_nonsplat(<4 x i32> %a0) {
; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,8,16,32]
; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/combine-srem.ll b/llvm/test/CodeGen/X86/combine-srem.ll
index 4b01c16..0ca79ad 100644
--- a/llvm/test/CodeGen/X86/combine-srem.ll
+++ b/llvm/test/CodeGen/X86/combine-srem.ll
@@ -272,7 +272,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b(<4 x i32> %x) {
; SSE-NEXT: psrad $2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [1,2,4,8]
; SSE-NEXT: psubd %xmm2, %xmm0
; SSE-NEXT: retq
;
@@ -291,7 +291,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b(<4 x i32> %x) {
; AVX1-NEXT: vpsrad $2, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,2,4,8]
; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
@@ -336,7 +336,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b_neg(<4 x i32> %x) {
; SSE-NEXT: psrld $1, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967294,4294967292,4294967288,4294967280]
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
;
@@ -358,7 +358,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b_neg(<4 x i32> %x) {
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [4294967294,4294967292,4294967288,4294967280]
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
@@ -368,7 +368,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b_neg(<4 x i32> %x) {
; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [4294967294,4294967292,4294967288,4294967280]
; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = srem <4 x i32> %x, <i32 -2, i32 -4, i32 -8, i32 -16>
diff --git a/llvm/test/CodeGen/X86/combine-udiv.ll b/llvm/test/CodeGen/X86/combine-udiv.ll
index c90344b8..233735d 100644
--- a/llvm/test/CodeGen/X86/combine-udiv.ll
+++ b/llvm/test/CodeGen/X86/combine-udiv.ll
@@ -665,14 +665,12 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) {
;
; XOP-LABEL: combine_vec_udiv_nonuniform4:
; XOP: # %bb.0:
-; XOP-NEXT: movl $171, %eax
+; XOP-NEXT: movl $249, %eax
; XOP-NEXT: vmovd %eax, %xmm1
; XOP-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; XOP-NEXT: vpmullw %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpsrlw $8, %xmm1, %xmm1
-; XOP-NEXT: movl $249, %eax
-; XOP-NEXT: vmovd %eax, %xmm2
-; XOP-NEXT: vpshlb %xmm2, %xmm1, %xmm1
+; XOP-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [171,0,0,0]
+; XOP-NEXT: vpsrlw $8, %xmm2, %xmm2
+; XOP-NEXT: vpshlb %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpmovsxwq {{.*#+}} xmm2 = [18446744073709551360,18446744073709551615]
; XOP-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; XOP-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/combine-umax.ll b/llvm/test/CodeGen/X86/combine-umax.ll
index 25f8ec8..482b4fc 100644
--- a/llvm/test/CodeGen/X86/combine-umax.ll
+++ b/llvm/test/CodeGen/X86/combine-umax.ll
@@ -60,7 +60,7 @@ define <16 x i8> @test_v16i8_reassociation(<16 x i8> %a) {
define <16 x i8> @test_v16i8_demandedbits(<16 x i8> %x, <16 x i8> %y, <16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test_v16i8_demandedbits:
; SSE2: # %bb.0:
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
diff --git a/llvm/test/CodeGen/X86/combine-umin.ll b/llvm/test/CodeGen/X86/combine-umin.ll
index 76dbcb5..e2757d0 100644
--- a/llvm/test/CodeGen/X86/combine-umin.ll
+++ b/llvm/test/CodeGen/X86/combine-umin.ll
@@ -77,7 +77,7 @@ define <16 x i8> @test_v16i8_reassociation(<16 x i8> %a) {
define <16 x i8> @test_v16i8_demandedbits(<16 x i8> %x, <16 x i8> %y, <16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test_v16i8_demandedbits:
; SSE2: # %bb.0:
-; SSE2-NEXT: pminub %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
diff --git a/llvm/test/CodeGen/X86/combine-urem.ll b/llvm/test/CodeGen/X86/combine-urem.ll
index 715d5c7..34c7d3d 100644
--- a/llvm/test/CodeGen/X86/combine-urem.ll
+++ b/llvm/test/CodeGen/X86/combine-urem.ll
@@ -327,7 +327,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
-; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,4,8,16]
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: paddd %xmm1, %xmm2
; SSE-NEXT: pand %xmm2, %xmm0
@@ -338,7 +338,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,4,8,16]
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
index 345b2b9..19b9452 100644
--- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
@@ -437,9 +437,9 @@ define <4 x i32> @shift_zext_shl2_vec(<4 x i8> %x) nounwind {
; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [512,256,128,64]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [256,u,64,u]
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/funnel-shift.ll b/llvm/test/CodeGen/X86/funnel-shift.ll
index df97f49..252cb33 100644
--- a/llvm/test/CodeGen/X86/funnel-shift.ll
+++ b/llvm/test/CodeGen/X86/funnel-shift.ll
@@ -574,9 +574,9 @@ define <4 x i32> @fshl_v4i32_undef1_cst(<4 x i32> %a0) nounwind {
; X86-SSE2-LABEL: fshl_v4i32_undef1_cst:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [512,1024,2048,4096]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [1024,u,4096,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE2-NEXT: retl
@@ -746,9 +746,9 @@ define <4 x i32> @fshr_v4i32_undef1_cst(<4 x i32> %a0) nounwind {
; X86-SSE2-LABEL: fshr_v4i32_undef1_cst:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [8388608,4194304,2097152,1048576]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [4194304,u,1048576,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE2-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
index 1a2aac6..b45d01e 100644
--- a/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
@@ -499,9 +499,9 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [0,1,16776960,2147483648]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [1,u,2147483648,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X86-SSE2-NEXT: pand %xmm1, %xmm0
@@ -524,9 +524,9 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
; X64-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,1,16776960,2147483648]
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [1,u,2147483648,u]
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X64-SSE2-NEXT: pand %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/known-pow2.ll b/llvm/test/CodeGen/X86/known-pow2.ll
index e183bbc..019bca7 100644
--- a/llvm/test/CodeGen/X86/known-pow2.ll
+++ b/llvm/test/CodeGen/X86/known-pow2.ll
@@ -28,16 +28,16 @@ define <4 x i32> @pow2_non_splat_vec_fail0(<4 x i32> %x) {
; CHECK-NEXT: pmuludq %xmm0, %xmm1
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [1073741824,u,67108864,u]
; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; CHECK-NEXT: movdqa %xmm1, %xmm3
; CHECK-NEXT: psrld $1, %xmm3
; CHECK-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3]
-; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [9,4,16,64]
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [4,u,64,u]
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; CHECK-NEXT: psubd %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/madd.ll b/llvm/test/CodeGen/X86/madd.ll
index 4ec54d8..2a2a4a5 100644
--- a/llvm/test/CodeGen/X86/madd.ll
+++ b/llvm/test/CodeGen/X86/madd.ll
@@ -2057,10 +2057,10 @@ define <4 x i32> @pmaddwd_negative2(<8 x i16> %A) {
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294934528,0,0,0]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE2-NEXT: pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,0,7,0,42,0,32,0]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [32768,4294934528,0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
; SSE2-NEXT: paddd %xmm2, %xmm1
@@ -2072,14 +2072,14 @@ define <4 x i32> @pmaddwd_negative2(<8 x i16> %A) {
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,7,42,32]
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32768,4294934528,0,0]
; AVX1-NEXT: vphaddd %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX256-LABEL: pmaddwd_negative2:
; AVX256: # %bb.0:
; AVX256-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX256-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX256-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [32768,4294934528,0,0,1,7,42,32]
; AVX256-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX256-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX256-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll b/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll
index 693d199..9729fd7 100644
--- a/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll
+++ b/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll
@@ -100,7 +100,7 @@ define <4 x i1> @p4_vector_urem_by_const__splat(<4 x i32> %x, <4 x i32> %y) {
; SSE4-LABEL: p4_vector_urem_by_const__splat:
; SSE4: # %bb.0:
; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; SSE4-NEXT: psrld $1, %xmm0
; SSE4-NEXT: movdqa {{.*#+}} xmm1 = [715827883,715827883,715827883,715827883]
; SSE4-NEXT: pcmpgtd %xmm0, %xmm1
@@ -128,10 +128,10 @@ define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) {
; SSE2: # %bb.0:
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,u,954437177,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,3435973837,2863311531,954437177]
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,2147483648,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: psrlq $32, %xmm0
@@ -145,7 +145,7 @@ define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) {
; SSE4-LABEL: p5_vector_urem_by_const__nonsplat:
; SSE4: # %bb.0:
; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,3435973837,2863311531,954437177]
; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = [1,2147483648]
; SSE4-NEXT: pmuludq %xmm0, %xmm1
; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
@@ -159,7 +159,7 @@ define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) {
; AVX2-LABEL: p5_vector_urem_by_const__nonsplat:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,3435973837,2863311531,954437177]
; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -196,7 +196,7 @@ define <4 x i1> @p6_vector_urem_by_const__nonsplat_undef0(<4 x i32> %x, <4 x i32
; SSE4-LABEL: p6_vector_urem_by_const__nonsplat_undef0:
; SSE4: # %bb.0:
; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; SSE4-NEXT: movdqa %xmm0, %xmm1
; SSE4-NEXT: psrld $1, %xmm1
; SSE4-NEXT: pslld $31, %xmm0
@@ -312,7 +312,7 @@ define <4 x i1> @p8_vector_urem_by_const__nonsplat_undef3(<4 x i32> %x, <4 x i32
; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; SSE4-NEXT: psrld $2, %xmm2
-; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [6,6,6,6]
; SSE4-NEXT: psubd %xmm2, %xmm0
; SSE4-NEXT: pxor %xmm1, %xmm1
; SSE4-NEXT: pcmpeqd %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll
index 9aee2f1..00731fe 100644
--- a/llvm/test/CodeGen/X86/pmul.ll
+++ b/llvm/test/CodeGen/X86/pmul.ll
@@ -91,7 +91,7 @@ define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind {
;
; SSE41-LABEL: mul_v4i32c:
; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [117,117,117,117]
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i32c:
diff --git a/llvm/test/CodeGen/X86/pr162812.ll b/llvm/test/CodeGen/X86/pr162812.ll
index 4ea3101..cec093c 100644
--- a/llvm/test/CodeGen/X86/pr162812.ll
+++ b/llvm/test/CodeGen/X86/pr162812.ll
@@ -34,61 +34,43 @@ define <32 x i8> @PR162812(<32 x i8> %a, <32 x i8> %mask) {
;
; SSE42-LABEL: PR162812:
; SSE42: # %bb.0:
-; SSE42-NEXT: movdqa %xmm2, %xmm5
-; SSE42-NEXT: movdqa %xmm0, %xmm2
+; SSE42-NEXT: movdqa %xmm0, %xmm4
+; SSE42-NEXT: psrlw $2, %xmm2
+; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [8224,8224,8224,8224,8224,8224,8224,8224]
+; SSE42-NEXT: pand %xmm5, %xmm2
+; SSE42-NEXT: paddb %xmm2, %xmm2
+; SSE42-NEXT: paddb %xmm2, %xmm2
; SSE42-NEXT: movdqa %xmm0, %xmm6
-; SSE42-NEXT: psllw $2, %xmm6
-; SSE42-NEXT: movdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; SSE42-NEXT: pand %xmm7, %xmm6
-; SSE42-NEXT: psrlw $2, %xmm5
-; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [8224,8224,8224,8224,8224,8224,8224,8224]
-; SSE42-NEXT: pand %xmm4, %xmm5
+; SSE42-NEXT: paddb %xmm0, %xmm6
+; SSE42-NEXT: movdqa %xmm2, %xmm0
+; SSE42-NEXT: pblendvb %xmm0, %xmm6, %xmm4
+; SSE42-NEXT: psrlw $2, %xmm3
+; SSE42-NEXT: pand %xmm3, %xmm5
; SSE42-NEXT: paddb %xmm5, %xmm5
-; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm6, %xmm2
-; SSE42-NEXT: movdqa %xmm2, %xmm6
-; SSE42-NEXT: paddb %xmm2, %xmm6
; SSE42-NEXT: paddb %xmm5, %xmm5
+; SSE42-NEXT: movdqa %xmm1, %xmm2
+; SSE42-NEXT: paddb %xmm1, %xmm2
; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm6, %xmm2
-; SSE42-NEXT: movdqa %xmm1, %xmm5
-; SSE42-NEXT: psllw $2, %xmm5
-; SSE42-NEXT: pand %xmm7, %xmm5
-; SSE42-NEXT: psrlw $2, %xmm3
-; SSE42-NEXT: pand %xmm3, %xmm4
-; SSE42-NEXT: paddb %xmm4, %xmm4
-; SSE42-NEXT: movdqa %xmm4, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm5, %xmm1
-; SSE42-NEXT: movdqa %xmm1, %xmm3
-; SSE42-NEXT: paddb %xmm1, %xmm3
-; SSE42-NEXT: paddb %xmm4, %xmm4
+; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movdqa %xmm4, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movdqa %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX2-LABEL: PR162812:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
; AVX2-NEXT: vpsrlw $2, %ymm1, %ymm1
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR162812:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm2, %ymm2
+; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2
; AVX512-NEXT: vpsrlw $2, %ymm1, %ymm1
; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm1
; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2
; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX512-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll b/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll
index 885b075..59b03f8 100644
--- a/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll
+++ b/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll
@@ -9,7 +9,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) {
; AVX256BW: # %bb.0:
; AVX256BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX256BW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX256BW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX256BW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX256BW-NEXT: vpmullw %ymm3, %ymm2, %ymm2
; AVX256BW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX256BW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
diff --git a/llvm/test/CodeGen/X86/rotate-extract-vector.ll b/llvm/test/CodeGen/X86/rotate-extract-vector.ll
index 1ead3f9..7d0ec64 100644
--- a/llvm/test/CodeGen/X86/rotate-extract-vector.ll
+++ b/llvm/test/CodeGen/X86/rotate-extract-vector.ll
@@ -149,19 +149,12 @@ define <32 x i16> @illegal_no_extract_mul(<32 x i16> %i) nounwind {
; Result would undershift
define <4 x i64> @no_extract_shl(<4 x i64> %i) nounwind {
-; X86-LABEL: no_extract_shl:
-; X86: # %bb.0:
-; X86-NEXT: vpsllq $24, %ymm0, %ymm1
-; X86-NEXT: vpsrlq $39, %ymm0, %ymm0
-; X86-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %ymm1, %ymm0
-; X86-NEXT: retl
-;
-; X64-LABEL: no_extract_shl:
-; X64: # %bb.0:
-; X64-NEXT: vpsllq $24, %ymm0, %ymm1
-; X64-NEXT: vpsrlq $39, %ymm0, %ymm0
-; X64-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
-; X64-NEXT: retq
+; CHECK-LABEL: no_extract_shl:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsllq $24, %ymm0, %ymm1
+; CHECK-NEXT: vpsrlq $39, %ymm0, %ymm0
+; CHECK-NEXT: vpternlogq {{.*#+}} ymm0 = (ymm0 & m64bcst) | ymm1
+; CHECK-NEXT: ret{{[l|q]}}
%lhs_mul = shl <4 x i64> %i, <i64 11, i64 11, i64 11, i64 11>
%rhs_mul = shl <4 x i64> %i, <i64 24, i64 24, i64 24, i64 24>
%lhs_shift = lshr <4 x i64> %lhs_mul, <i64 50, i64 50, i64 50, i64 50>
@@ -171,19 +164,12 @@ define <4 x i64> @no_extract_shl(<4 x i64> %i) nounwind {
; Result would overshift
define <4 x i32> @no_extract_shrl(<4 x i32> %i) nounwind {
-; X86-LABEL: no_extract_shrl:
-; X86: # %bb.0:
-; X86-NEXT: vpsrld $9, %xmm0, %xmm1
-; X86-NEXT: vpslld $25, %xmm0, %xmm0
-; X86-NEXT: vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm1, %xmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: no_extract_shrl:
-; X64: # %bb.0:
-; X64-NEXT: vpsrld $9, %xmm0, %xmm1
-; X64-NEXT: vpslld $25, %xmm0, %xmm0
-; X64-NEXT: vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
-; X64-NEXT: retq
+; CHECK-LABEL: no_extract_shrl:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpsrld $9, %xmm0, %xmm1
+; CHECK-NEXT: vpslld $25, %xmm0, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = (xmm0 & m32bcst) | xmm1
+; CHECK-NEXT: ret{{[l|q]}}
%lhs_div = lshr <4 x i32> %i, <i32 3, i32 3, i32 3, i32 3>
%rhs_div = lshr <4 x i32> %i, <i32 9, i32 9, i32 9, i32 9>
%lhs_shift = shl <4 x i32> %lhs_div, <i32 28, i32 28, i32 28, i32 28>
diff --git a/llvm/test/CodeGen/X86/sdiv-exact.ll b/llvm/test/CodeGen/X86/sdiv-exact.ll
index 4568191..7873ffa 100644
--- a/llvm/test/CodeGen/X86/sdiv-exact.ll
+++ b/llvm/test/CodeGen/X86/sdiv-exact.ll
@@ -87,7 +87,7 @@ define <4 x i32> @test5(<4 x i32> %x) {
; X86-NEXT: pmuludq %xmm1, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [2863311531,u,3264175145,u]
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: retl
@@ -95,7 +95,7 @@ define <4 x i32> @test5(<4 x i32> %x) {
; X64-LABEL: test5:
; X64: # %bb.0:
; X64-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,3264175145,3264175145]
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25>
ret <4 x i32> %div
@@ -112,7 +112,7 @@ define <4 x i32> @test6(<4 x i32> %x) {
; X86-NEXT: pmuludq %xmm0, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [2863311531,u,3303820997,u]
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-NEXT: movdqa %xmm1, %xmm0
@@ -121,7 +121,7 @@ define <4 x i32> @test6(<4 x i32> %x) {
; X64-LABEL: test6:
; X64: # %bb.0:
; X64-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,3303820997,3303820997]
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 26, i32 26>
ret <4 x i32> %div
@@ -131,16 +131,16 @@ define <4 x i32> @test7(<4 x i32> %x) {
; X86-LABEL: test7:
; X86: # %bb.0:
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [3264175145,3264175145,1749801491,1749801491]
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [3264175145,u,1749801491,u]
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: retl
;
; X64-LABEL: test7:
; X64: # %bb.0:
-; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,1749801491,1749801491]
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 25, i32 25, i32 27, i32 27>
ret <4 x i32> %div
@@ -156,7 +156,7 @@ define <4 x i32> @test8(<4 x i32> %x) {
; X86-NEXT: pmuludq %xmm1, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [1,u,2863311531,u]
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: retl
@@ -164,7 +164,7 @@ define <4 x i32> @test8(<4 x i32> %x) {
; X64-LABEL: test8:
; X64: # %bb.0:
; X64-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,1,2863311531,2863311531]
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 1, i32 1, i32 24, i32 24>
ret <4 x i32> %div
diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll
index e53eed4..504a392 100644
--- a/llvm/test/CodeGen/X86/shrink_vmul.ll
+++ b/llvm/test/CodeGen/X86/shrink_vmul.ll
@@ -1760,7 +1760,7 @@ define void @mul_2xi16_varconst1(ptr nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [0,65535,u,u]
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1781,7 +1781,7 @@ define void @mul_2xi16_varconst1(ptr nocapture readonly %a, i64 %index) {
; X64-AVX-NEXT: movq c(%rip), %rax
; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,65535,u,u]
; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
; X64-AVX-NEXT: retq
entry:
@@ -1864,7 +1864,7 @@ define void @mul_2xi16_varconst3(ptr nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: movl c, %edx
; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
-; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [65536,65536,65536,65536]
; X86-SSE-NEXT: psllq $32, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
; X86-SSE-NEXT: retl
@@ -1876,7 +1876,7 @@ define void @mul_2xi16_varconst3(ptr nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [0,65536,u,u]
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1885,7 +1885,7 @@ define void @mul_2xi16_varconst3(ptr nocapture readonly %a, i64 %index) {
; X64-SSE-NEXT: movq c(%rip), %rax
; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
-; X64-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [65536,65536,65536,65536]
; X64-SSE-NEXT: psllq $32, %xmm0
; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
; X64-SSE-NEXT: retq
@@ -1895,7 +1895,7 @@ define void @mul_2xi16_varconst3(ptr nocapture readonly %a, i64 %index) {
; X64-AVX-NEXT: movq c(%rip), %rax
; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,65536,u,u]
; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
; X64-AVX-NEXT: retq
entry:
@@ -1922,7 +1922,7 @@ define void @mul_2xi16_varconst4(ptr nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: movl c, %edx
; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: psrad $16, %xmm0
-; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [32768,32768,32768,32768]
; X86-SSE-NEXT: psllq $32, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
; X86-SSE-NEXT: retl
@@ -1934,7 +1934,7 @@ define void @mul_2xi16_varconst4(ptr nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
-; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [0,32768,u,u]
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1943,7 +1943,7 @@ define void @mul_2xi16_varconst4(ptr nocapture readonly %a, i64 %index) {
; X64-SSE-NEXT: movq c(%rip), %rax
; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-SSE-NEXT: psrad $16, %xmm0
-; X64-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [32768,32768,32768,32768]
; X64-SSE-NEXT: psllq $32, %xmm0
; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
; X64-SSE-NEXT: retq
@@ -1953,7 +1953,7 @@ define void @mul_2xi16_varconst4(ptr nocapture readonly %a, i64 %index) {
; X64-AVX-NEXT: movq c(%rip), %rax
; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
-; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,32768,u,u]
; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)
; X64-AVX-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/slow-pmulld.ll b/llvm/test/CodeGen/X86/slow-pmulld.ll
index 975ffd0..e8c05f9 100644
--- a/llvm/test/CodeGen/X86/slow-pmulld.ll
+++ b/llvm/test/CodeGen/X86/slow-pmulld.ll
@@ -336,13 +336,13 @@ define <4 x i32> @test_mul_v4i32_v4i16(<4 x i16> %A) {
; SSE4-32-LABEL: test_mul_v4i32_v4i16:
; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE4-32-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE4-32-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [18778,18778,18778,18778]
; SSE4-32-NEXT: retl
;
; SSE4-64-LABEL: test_mul_v4i32_v4i16:
; SSE4-64: # %bb.0:
; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE4-64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE4-64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [18778,18778,18778,18778]
; SSE4-64-NEXT: retq
;
; AVX2-SLOW-LABEL: test_mul_v4i32_v4i16:
@@ -838,13 +838,13 @@ define <4 x i32> @test_mul_v4i32_v4i16_minsize(<4 x i16> %A) minsize {
; SSE-32-LABEL: test_mul_v4i32_v4i16_minsize:
; SSE-32: # %bb.0:
; SSE-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE-32-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE-32-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [18778,18778,18778,18778]
; SSE-32-NEXT: retl
;
; SSE-64-LABEL: test_mul_v4i32_v4i16_minsize:
; SSE-64: # %bb.0:
; SSE-64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE-64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [18778,18778,18778,18778]
; SSE-64-NEXT: retq
;
; AVX2-LABEL: test_mul_v4i32_v4i16_minsize:
diff --git a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll
index 2d07788..bb7245c 100644
--- a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll
+++ b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll
@@ -10,15 +10,15 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_even:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,3264175145,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -30,10 +30,10 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_even:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,3264175145,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -47,10 +47,10 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_even:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -63,7 +63,7 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_odd_even:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -75,7 +75,7 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -109,7 +109,7 @@ define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_allones_eq:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
@@ -119,7 +119,7 @@ define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_allones_eq:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -168,7 +168,7 @@ define <4 x i32> @test_srem_odd_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_allones_ne:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
@@ -178,7 +178,7 @@ define <4 x i32> @test_srem_odd_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_allones_ne:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -234,7 +234,7 @@ define <4 x i32> @test_srem_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_even_allones_eq:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $1, %xmm1
@@ -248,7 +248,7 @@ define <4 x i32> @test_srem_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_even_allones_eq:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0
@@ -308,7 +308,7 @@ define <4 x i32> @test_srem_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_even_allones_ne:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $1, %xmm1
@@ -322,7 +322,7 @@ define <4 x i32> @test_srem_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_even_allones_ne:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0
@@ -367,15 +367,15 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_even_allones_eq:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -387,10 +387,10 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_even_allones_eq:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -404,10 +404,10 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_even_allones_eq:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -420,7 +420,7 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_odd_even_allones_eq:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -432,7 +432,7 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_eq:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -448,15 +448,15 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_even_allones_ne:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -468,10 +468,10 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_even_allones_ne:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -485,10 +485,10 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_even_allones_ne:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -501,7 +501,7 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_odd_even_allones_ne:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -514,7 +514,7 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_ne:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -534,14 +534,14 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,u,1,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrlq $32, %xmm0
@@ -553,7 +553,7 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pmovsxdq {{.*#+}} xmm1 = [1,268435456]
; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1
@@ -568,9 +568,9 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,268435456,u]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -581,7 +581,7 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_odd_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -593,7 +593,7 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -611,9 +611,9 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_even_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,u,1,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -636,11 +636,11 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_even_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,268435456,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -654,11 +654,11 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_even_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,2147483648,268435456,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -671,7 +671,7 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_even_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -683,7 +683,7 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_even_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -701,9 +701,9 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_even_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -711,7 +711,7 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [2147483648,u,1073741824,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
@@ -725,11 +725,11 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_even_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2147483648,268435456,1073741824]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -743,11 +743,11 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_even_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,u,1073741824,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2147483648,268435456,1073741824]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -760,7 +760,7 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_odd_even_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -772,7 +772,7 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -806,7 +806,7 @@ define <4 x i32> @test_srem_odd_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
@@ -816,7 +816,7 @@ define <4 x i32> @test_srem_odd_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -871,7 +871,7 @@ define <4 x i32> @test_srem_even_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_even_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $1, %xmm1
@@ -885,7 +885,7 @@ define <4 x i32> @test_srem_even_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_even_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0
@@ -929,15 +929,15 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_even_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -949,10 +949,10 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_even_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -966,10 +966,10 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_even_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -982,7 +982,7 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_odd_even_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -994,7 +994,7 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -1018,9 +1018,9 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE2-NEXT: pand %xmm0, %xmm2
; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm2
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,u,1,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
@@ -1039,7 +1039,7 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
; CHECK-SSE41-NEXT: pand %xmm0, %xmm2
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm2
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,1,858993458]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
@@ -1053,7 +1053,7 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
; CHECK-AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
@@ -1067,7 +1067,7 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
; CHECK-AVX2-NEXT: vpand %xmm2, %xmm0, %xmm2
; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
; CHECK-AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
@@ -1080,7 +1080,7 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
@@ -1102,7 +1102,7 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -1137,8 +1137,8 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm2
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [2147483648,2147483648,2,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
@@ -1156,11 +1156,11 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-AVX1-LABEL: test_srem_even_INT_MIN:
; CHECK-AVX1: # %bb.0:
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [2147483648,2147483648,2,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
@@ -1177,7 +1177,7 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-AVX2-LABEL: test_srem_even_INT_MIN:
; CHECK-AVX2: # %bb.0:
; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
@@ -1196,7 +1196,7 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
@@ -1219,7 +1219,7 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [3067833783,u,3264175145,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -1227,7 +1227,7 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,3,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
@@ -1253,8 +1253,8 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm2
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [2147483648,u,1073741824,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [1,2147483648,2,1073741824]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
@@ -1272,11 +1272,11 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-AVX1-LABEL: test_srem_odd_even_INT_MIN:
; CHECK-AVX1: # %bb.0:
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [2147483648,u,1073741824,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [1,2147483648,2,1073741824]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
@@ -1293,7 +1293,7 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-AVX2-LABEL: test_srem_odd_even_INT_MIN:
; CHECK-AVX2: # %bb.0:
; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
@@ -1312,7 +1312,7 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
@@ -1333,14 +1333,14 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_allones_and_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,1,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,u,3435973837,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrlq $32, %xmm0
@@ -1352,7 +1352,7 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_allones_and_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,1,3435973837]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pmovsxdq {{.*#+}} xmm1 = [1,268435456]
; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1
@@ -1367,9 +1367,9 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_allones_and_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3435973837]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,268435456,u]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1380,7 +1380,7 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_odd_allones_and_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3435973837]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -1392,7 +1392,7 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3435973837]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -1410,9 +1410,9 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_even_allones_and_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,0,1,3067833783]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,u,3067833783,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -1420,7 +1420,7 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [1,u,2147483648,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
@@ -1434,11 +1434,11 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_even_allones_and_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,0,1,3067833783]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,2147483648,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,1,268435456,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1452,11 +1452,11 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_even_allones_and_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,0,1,3067833783]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,u,2147483648,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,1,268435456,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1469,7 +1469,7 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_even_allones_and_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,0,1,3067833783]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -1481,7 +1481,7 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,0,1,3067833783]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -1499,9 +1499,9 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
; CHECK-SSE2-LABEL: test_srem_odd_even_allones_and_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,1,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,u,3264175145,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -1509,7 +1509,7 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [1,u,1073741824,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
@@ -1523,11 +1523,11 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
;
; CHECK-SSE41-LABEL: test_srem_odd_even_allones_and_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,1,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,1073741824,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,1,268435456,1073741824]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1541,11 +1541,11 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
;
; CHECK-AVX1-LABEL: test_srem_odd_even_allones_and_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,u,1073741824,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,1,268435456,1073741824]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1558,7 +1558,7 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
;
; CHECK-AVX2-LABEL: test_srem_odd_even_allones_and_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3264175145]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -1570,7 +1570,7 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_and_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3264175145]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -1604,7 +1604,7 @@ define <4 x i32> @test_srem_odd_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_allones_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,4294967295,4294967295,858993458]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
@@ -1614,7 +1614,7 @@ define <4 x i32> @test_srem_odd_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_allones_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1669,7 +1669,7 @@ define <4 x i32> @test_srem_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_even_allones_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $1, %xmm1
@@ -1683,7 +1683,7 @@ define <4 x i32> @test_srem_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_even_allones_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0
@@ -1727,15 +1727,15 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_even_allones_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,0,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,u,3264175145,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,1,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -1747,10 +1747,10 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_even_allones_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,0,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,1,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1764,10 +1764,10 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_even_allones_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,0,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,1,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1780,7 +1780,7 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_odd_even_allones_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,0,3264175145]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -1792,7 +1792,7 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,0,3264175145]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -1812,15 +1812,15 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_poweroftwo_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3435973837,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,268435456,1,1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -1832,10 +1832,10 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_poweroftwo_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3435973837]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1,1]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1849,10 +1849,10 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_poweroftwo_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,268435456,1,1]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1865,7 +1865,7 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_odd_poweroftwo_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -1877,7 +1877,7 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -1895,9 +1895,9 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_even_poweroftwo_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,1,0,3067833783]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3067833783,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -1905,7 +1905,7 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [268435456,u,2147483648,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
@@ -1919,11 +1919,11 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_even_poweroftwo_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,1,0,3067833783]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,u,2147483648,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,268435456,1,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1937,11 +1937,11 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_even_poweroftwo_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,u,2147483648,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,268435456,1,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1954,7 +1954,7 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_even_poweroftwo_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -1966,7 +1966,7 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_even_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -1984,15 +1984,15 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_even_poweroftwo_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3264175145,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,268435456,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -2004,10 +2004,10 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_even_poweroftwo_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -2021,10 +2021,10 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_even_poweroftwo_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,268435456,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -2037,7 +2037,7 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_srem_odd_even_poweroftwo_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -2049,7 +2049,7 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -2067,9 +2067,9 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_allones_and_poweroftwo_and_one:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,u,1,u]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u]
; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE2-NEXT: psrlq $32, %xmm1
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -2081,9 +2081,9 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou
;
; CHECK-SSE41-LABEL: test_srem_odd_allones_and_poweroftwo_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,u,1,u]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u]
; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: psrlq $32, %xmm0
@@ -2096,9 +2096,9 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou
;
; CHECK-AVX1-LABEL: test_srem_odd_allones_and_poweroftwo_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,u,1,u]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,u,268435456,u]
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -2110,7 +2110,7 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou
;
; CHECK-AVX2-LABEL: test_srem_odd_allones_and_poweroftwo_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,0]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -2122,7 +2122,7 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou
;
; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,0]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -2138,9 +2138,9 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou
define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_even_allones_and_poweroftwo_and_one:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,u,1,u]
; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,u,268435456,u]
; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE2-NEXT: psrlq $32, %xmm1
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -2152,9 +2152,9 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no
;
; CHECK-SSE41-LABEL: test_srem_even_allones_and_poweroftwo_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,u,1,u]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,u,268435456,u]
; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: psrlq $32, %xmm0
@@ -2167,9 +2167,9 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no
;
; CHECK-AVX1-LABEL: test_srem_even_allones_and_poweroftwo_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,u,1,u]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,u,268435456,u]
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -2181,7 +2181,7 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no
;
; CHECK-AVX2-LABEL: test_srem_even_allones_and_poweroftwo_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,0,1,0]
; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -2193,7 +2193,7 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no
;
; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,0,1,0]
; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -2335,10 +2335,10 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) {
; CHECK-AVX1: # %bb.0:
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [34048,34048,26368,37632,21760,33024,22016,35072]
+; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,133,0,133,0,103,0,147,0,85,0,129,0,86,0,137]
; CHECK-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; CHECK-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [20224,26368,6912,30976,33024,33024,33024,12032]
+; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,79,0,103,0,27,0,121,0,129,0,129,0,129,0,47]
; CHECK-AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; CHECK-AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm4
; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm5 # [0,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0]
@@ -2369,10 +2369,10 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) {
; CHECK-AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm4
; CHECK-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6 # [2304,0,10496,37632,33024,33024,21760,36096]
+; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6 # [0,9,0,0,0,41,0,147,0,129,0,129,0,85,0,141]
; CHECK-AVX1-NEXT: vpsrlw $8, %xmm6, %xmm6
; CHECK-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7, %xmm7 # [22016,24320,37632,11008,12544,32512,16640,37632]
+; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7, %xmm7 # [0,86,0,95,0,147,0,43,0,49,0,127,0,65,0,147]
; CHECK-AVX1-NEXT: vpsrlw $8, %xmm7, %xmm7
; CHECK-AVX1-NEXT: vpackuswb %xmm6, %xmm7, %xmm6
; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm7 # [0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0]
@@ -2417,10 +2417,10 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) {
; CHECK-AVX2: # %bb.0:
; CHECK-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
-; CHECK-AVX2-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [34048,34048,26368,37632,21760,33024,22016,35072,2304,0,10496,37632,33024,33024,21760,36096]
+; CHECK-AVX2-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,133,0,133,0,103,0,147,0,85,0,129,0,86,0,137,0,9,0,0,0,41,0,147,0,129,0,129,0,85,0,141]
; CHECK-AVX2-NEXT: vpsrlw $8, %ymm3, %ymm3
; CHECK-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
-; CHECK-AVX2-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [20224,26368,6912,30976,33024,33024,33024,12032,22016,24320,37632,11008,12544,32512,16640,37632]
+; CHECK-AVX2-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,79,0,103,0,27,0,121,0,129,0,129,0,129,0,47,0,86,0,95,0,147,0,43,0,49,0,127,0,65,0,147]
; CHECK-AVX2-NEXT: vpsrlw $8, %ymm4, %ymm4
; CHECK-AVX2-NEXT: vpackuswb %ymm3, %ymm4, %ymm3
; CHECK-AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm4 # [0,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0]
diff --git a/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll b/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll
index 3359202..d459d01 100644
--- a/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll
+++ b/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll
@@ -24,7 +24,7 @@ define <4 x i32> @test_srem_odd_25(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_25:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
@@ -34,7 +34,7 @@ define <4 x i32> @test_srem_odd_25(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_25:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -90,7 +90,7 @@ define <4 x i32> @test_srem_even_100(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_even_100:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $2, %xmm1
@@ -104,7 +104,7 @@ define <4 x i32> @test_srem_even_100(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_even_100:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $2, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $30, %xmm0, %xmm0
@@ -165,7 +165,7 @@ define <4 x i32> @test_srem_odd_neg25(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_odd_neg25:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
@@ -175,7 +175,7 @@ define <4 x i32> @test_srem_odd_neg25(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_odd_neg25:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -231,7 +231,7 @@ define <4 x i32> @test_srem_even_neg100(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_srem_even_neg100:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $2, %xmm1
@@ -245,7 +245,7 @@ define <4 x i32> @test_srem_even_neg100(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_srem_even_neg100:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $2, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $30, %xmm0, %xmm0
@@ -333,7 +333,7 @@ define <4 x i32> @test_srem_odd_undef1(<4 x i32> %X) nounwind {
; CHECK-SSE41-NEXT: psrld $31, %xmm1
; CHECK-SSE41-NEXT: psrad $3, %xmm2
; CHECK-SSE41-NEXT: paddd %xmm1, %xmm2
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [25,25,25,25]
; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0
; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -351,7 +351,7 @@ define <4 x i32> @test_srem_odd_undef1(<4 x i32> %X) nounwind {
; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm2
; CHECK-AVX1-NEXT: vpsrad $3, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [25,25,25,25]
; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -444,7 +444,7 @@ define <4 x i32> @test_srem_even_undef1(<4 x i32> %X) nounwind {
; CHECK-SSE41-NEXT: psrld $31, %xmm1
; CHECK-SSE41-NEXT: psrad $5, %xmm2
; CHECK-SSE41-NEXT: paddd %xmm1, %xmm2
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [100,100,100,100]
; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0
; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -462,7 +462,7 @@ define <4 x i32> @test_srem_even_undef1(<4 x i32> %X) nounwind {
; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm2
; CHECK-AVX1-NEXT: vpsrad $5, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [100,100,100,100]
; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/udiv-exact.ll b/llvm/test/CodeGen/X86/udiv-exact.ll
index 271d11e..2b3f26a 100644
--- a/llvm/test/CodeGen/X86/udiv-exact.ll
+++ b/llvm/test/CodeGen/X86/udiv-exact.ll
@@ -87,7 +87,7 @@ define <4 x i32> @test5(<4 x i32> %x) {
; X86-NEXT: pmuludq %xmm1, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [2863311531,u,3264175145,u]
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: retl
@@ -95,7 +95,7 @@ define <4 x i32> @test5(<4 x i32> %x) {
; X64-LABEL: test5:
; X64: # %bb.0:
; X64-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,3264175145,3264175145]
; X64-NEXT: retq
%div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25>
ret <4 x i32> %div
@@ -112,7 +112,7 @@ define <4 x i32> @test6(<4 x i32> %x) {
; X86-NEXT: pmuludq %xmm0, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [2863311531,u,3303820997,u]
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-NEXT: movdqa %xmm1, %xmm0
@@ -121,7 +121,7 @@ define <4 x i32> @test6(<4 x i32> %x) {
; X64-LABEL: test6:
; X64: # %bb.0:
; X64-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,3303820997,3303820997]
; X64-NEXT: retq
%div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 26, i32 26>
ret <4 x i32> %div
@@ -131,16 +131,16 @@ define <4 x i32> @test7(<4 x i32> %x) {
; X86-LABEL: test7:
; X86: # %bb.0:
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [3264175145,3264175145,1749801491,1749801491]
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [3264175145,u,1749801491,u]
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: retl
;
; X64-LABEL: test7:
; X64: # %bb.0:
-; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,1749801491,1749801491]
; X64-NEXT: retq
%div = udiv exact <4 x i32> %x, <i32 25, i32 25, i32 27, i32 27>
ret <4 x i32> %div
@@ -156,7 +156,7 @@ define <4 x i32> @test8(<4 x i32> %x) {
; X86-NEXT: pmuludq %xmm1, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [1,u,2863311531,u]
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: retl
@@ -164,7 +164,7 @@ define <4 x i32> @test8(<4 x i32> %x) {
; X64-LABEL: test8:
; X64: # %bb.0:
; X64-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,1,2863311531,2863311531]
; X64-NEXT: retq
%div = udiv exact <4 x i32> %x, <i32 1, i32 1, i32 24, i32 24>
ret <4 x i32> %div
diff --git a/llvm/test/CodeGen/X86/undo-mul-and.ll b/llvm/test/CodeGen/X86/undo-mul-and.ll
index c9c40099..6566153 100644
--- a/llvm/test/CodeGen/X86/undo-mul-and.ll
+++ b/llvm/test/CodeGen/X86/undo-mul-and.ll
@@ -63,9 +63,9 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_fail_no_splat(<4 x i32> %x) {
; CHECK-SSE-LABEL: mul_and_to_neg_shl_and_vec_fail_no_splat:
; CHECK-SSE: # %bb.0:
; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [56,56,56,64]
; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [56,u,64,u]
; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -73,13 +73,13 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_fail_no_splat(<4 x i32> %x) {
;
; CHECK-AVX1-LABEL: mul_and_to_neg_shl_and_vec_fail_no_splat:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [56,56,56,64]
; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
;
; CHECK-AVX512-LABEL: mul_and_to_neg_shl_and_vec_fail_no_splat:
; CHECK-AVX512: # %bb.0:
-; CHECK-AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [56,56,56,64]
; CHECK-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%mul = mul <4 x i32> %x, <i32 56, i32 56, i32 56, i32 64>
@@ -92,9 +92,9 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_todo_no_splat1(<4 x i32> %x) {
; CHECK-SSE-LABEL: mul_and_to_neg_shl_and_vec_todo_no_splat1:
; CHECK-SSE: # %bb.0:
; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [56,56,56,48]
; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [56,u,48,u]
; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -102,13 +102,13 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_todo_no_splat1(<4 x i32> %x) {
;
; CHECK-AVX1-LABEL: mul_and_to_neg_shl_and_vec_todo_no_splat1:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [56,56,56,48]
; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
;
; CHECK-AVX512-LABEL: mul_and_to_neg_shl_and_vec_todo_no_splat1:
; CHECK-AVX512: # %bb.0:
-; CHECK-AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [56,56,56,48]
; CHECK-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%mul = mul <4 x i32> %x, <i32 56, i32 56, i32 56, i32 48>
@@ -131,7 +131,7 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_todo_no_splat2(<4 x i32> %x) {
;
; CHECK-AVX1-LABEL: mul_and_to_neg_shl_and_vec_todo_no_splat2:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [56,56,56,56]
; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll
index 874d885..759055d 100644
--- a/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll
@@ -167,7 +167,7 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind {
; SSE41-NEXT: pinsrd $1, %esi, %xmm0
; SSE41-NEXT: pinsrd $2, %edx, %xmm0
; SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [683,1463,819,u]
; SSE41-NEXT: pmovsxwd {{.*#+}} xmm1 = [2047,2047,2047,2047]
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pand %xmm1, %xmm2
@@ -193,7 +193,7 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind {
; AVX1-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; AVX1-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
; AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [683,1463,819,u]
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [2047,2047,2047,2047]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
@@ -218,7 +218,7 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind {
; AVX2-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; AVX2-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
; AVX2-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [683,1463,819,u]
; AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2047,2047,2047,2047]
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -240,7 +240,7 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind {
; AVX512VL-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; AVX512VL-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
; AVX512VL-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [683,1463,819,u]
; AVX512VL-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2047,2047,2047,2047]
; AVX512VL-NEXT: vpand %xmm2, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll
index 838086e..2228c09 100644
--- a/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll
+++ b/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll
@@ -10,10 +10,10 @@ define <4 x i32> @test_urem_odd_even(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_even:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,3264175145,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -26,9 +26,9 @@ define <4 x i32> @test_urem_odd_even(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_even:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,3264175145,3264175145]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -42,9 +42,9 @@ define <4 x i32> @test_urem_odd_even(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_even:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -57,7 +57,7 @@ define <4 x i32> @test_urem_odd_even(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_even:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -68,7 +68,7 @@ define <4 x i32> @test_urem_odd_even(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_even:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -87,9 +87,9 @@ define <4 x i32> @test_urem_odd_allones_eq(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_allones_eq:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,4294967295,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -99,7 +99,7 @@ define <4 x i32> @test_urem_odd_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_allones_eq:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,4294967295,3435973837]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993459,858993459,1,858993459]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -108,7 +108,7 @@ define <4 x i32> @test_urem_odd_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX-LABEL: test_urem_odd_allones_eq:
; CHECK-AVX: # %bb.0:
-; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,4294967295,3435973837]
; CHECK-AVX-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX-NEXT: vpsrld $31, %xmm0, %xmm0
@@ -122,9 +122,9 @@ define <4 x i32> @test_urem_odd_allones_ne(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_allones_ne:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,4294967295,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -134,7 +134,7 @@ define <4 x i32> @test_urem_odd_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_allones_ne:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,4294967295,3435973837]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993460,858993460,2,858993460]
; CHECK-SSE41-NEXT: pmaxud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -143,7 +143,7 @@ define <4 x i32> @test_urem_odd_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX-LABEL: test_urem_odd_allones_ne:
; CHECK-AVX: # %bb.0:
-; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,4294967295,3435973837]
; CHECK-AVX-NEXT: vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX-NEXT: vpsrld $31, %xmm0, %xmm0
@@ -159,12 +159,12 @@ define <4 x i32> @test_urem_even_allones_eq(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_even_allones_eq:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,4294967295,3067833783]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1,2147483648]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -178,10 +178,10 @@ define <4 x i32> @test_urem_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_even_allones_eq:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,4294967295,3067833783]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -195,10 +195,10 @@ define <4 x i32> @test_urem_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_even_allones_eq:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,2147483648,1,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -211,7 +211,7 @@ define <4 x i32> @test_urem_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_even_allones_eq:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -222,7 +222,7 @@ define <4 x i32> @test_urem_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_even_allones_eq:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -237,12 +237,12 @@ define <4 x i32> @test_urem_even_allones_ne(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_even_allones_ne:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,4294967295,3067833783]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1,2147483648]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -256,10 +256,10 @@ define <4 x i32> @test_urem_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_even_allones_ne:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,4294967295,3067833783]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -273,10 +273,10 @@ define <4 x i32> @test_urem_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_even_allones_ne:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,2147483648,1,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -289,7 +289,7 @@ define <4 x i32> @test_urem_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_even_allones_ne:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -300,7 +300,7 @@ define <4 x i32> @test_urem_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_even_allones_ne:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -317,10 +317,10 @@ define <4 x i32> @test_urem_odd_even_allones_eq(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_even_allones_eq:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,4294967295,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -333,9 +333,9 @@ define <4 x i32> @test_urem_odd_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_even_allones_eq:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,4294967295,3264175145]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -349,9 +349,9 @@ define <4 x i32> @test_urem_odd_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_even_allones_eq:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -364,7 +364,7 @@ define <4 x i32> @test_urem_odd_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_even_allones_eq:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -375,7 +375,7 @@ define <4 x i32> @test_urem_odd_even_allones_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_even_allones_eq:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -390,10 +390,10 @@ define <4 x i32> @test_urem_odd_even_allones_ne(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_even_allones_ne:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,4294967295,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -406,9 +406,9 @@ define <4 x i32> @test_urem_odd_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_even_allones_ne:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,4294967295,3264175145]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -422,9 +422,9 @@ define <4 x i32> @test_urem_odd_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_even_allones_ne:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -437,7 +437,7 @@ define <4 x i32> @test_urem_odd_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_even_allones_ne:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -448,7 +448,7 @@ define <4 x i32> @test_urem_odd_even_allones_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_even_allones_ne:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -467,10 +467,10 @@ define <4 x i32> @test_urem_odd_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrlq $32, %xmm0
@@ -482,7 +482,7 @@ define <4 x i32> @test_urem_odd_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-SSE41-NEXT: pmovsxdq {{.*#+}} xmm1 = [1,268435456]
; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
@@ -496,8 +496,8 @@ define <4 x i32> @test_urem_odd_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,268435456,u]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -508,7 +508,7 @@ define <4 x i32> @test_urem_odd_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -519,7 +519,7 @@ define <4 x i32> @test_urem_odd_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -536,12 +536,12 @@ define <4 x i32> @test_urem_even_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_even_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,1,3067833783]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,268435456,2147483648]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -555,10 +555,10 @@ define <4 x i32> @test_urem_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_even_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,268435456,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -572,10 +572,10 @@ define <4 x i32> @test_urem_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_even_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,2147483648,268435456,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -588,7 +588,7 @@ define <4 x i32> @test_urem_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_even_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -599,7 +599,7 @@ define <4 x i32> @test_urem_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_even_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -616,11 +616,11 @@ define <4 x i32> @test_urem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_even_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2147483648,268435456,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -634,10 +634,10 @@ define <4 x i32> @test_urem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_even_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2147483648,268435456,1073741824]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -651,10 +651,10 @@ define <4 x i32> @test_urem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_even_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,u,1073741824,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2147483648,268435456,1073741824]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -667,7 +667,7 @@ define <4 x i32> @test_urem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_even_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -678,7 +678,7 @@ define <4 x i32> @test_urem_odd_even_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_even_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -710,7 +710,7 @@ define <4 x i32> @test_urem_odd_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993459,858993459,4294967295,858993459]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -719,7 +719,7 @@ define <4 x i32> @test_urem_odd_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
@@ -769,7 +769,7 @@ define <4 x i32> @test_urem_even_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_even_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $1, %xmm1
; CHECK-SSE41-NEXT: pslld $31, %xmm0
@@ -782,7 +782,7 @@ define <4 x i32> @test_urem_even_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_even_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -822,10 +822,10 @@ define <4 x i32> @test_urem_odd_even_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_even_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -838,9 +838,9 @@ define <4 x i32> @test_urem_odd_even_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_even_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -854,9 +854,9 @@ define <4 x i32> @test_urem_odd_even_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_even_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -869,7 +869,7 @@ define <4 x i32> @test_urem_odd_even_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_even_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -880,7 +880,7 @@ define <4 x i32> @test_urem_odd_even_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_even_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -899,10 +899,10 @@ define <4 x i32> @test_urem_odd_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_INT_MIN:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,2,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrlq $32, %xmm0
@@ -914,7 +914,7 @@ define <4 x i32> @test_urem_odd_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_INT_MIN:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-SSE41-NEXT: pmovsxbq {{.*#+}} xmm1 = [1,2]
; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
@@ -928,8 +928,8 @@ define <4 x i32> @test_urem_odd_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_INT_MIN:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,2,u]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -940,7 +940,7 @@ define <4 x i32> @test_urem_odd_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_INT_MIN:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -951,7 +951,7 @@ define <4 x i32> @test_urem_odd_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_INT_MIN:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -968,12 +968,12 @@ define <4 x i32> @test_urem_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_even_INT_MIN:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,3067833783,3067833783,3067833783]
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,1,3067833783]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,2,2147483648]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -987,10 +987,10 @@ define <4 x i32> @test_urem_even_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_even_INT_MIN:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,2,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1004,10 +1004,10 @@ define <4 x i32> @test_urem_even_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_even_INT_MIN:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,2147483648,2147483648]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,2147483648,2,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1020,7 +1020,7 @@ define <4 x i32> @test_urem_even_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_even_INT_MIN:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1031,7 +1031,7 @@ define <4 x i32> @test_urem_even_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_even_INT_MIN:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1048,11 +1048,11 @@ define <4 x i32> @test_urem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_even_INT_MIN:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2147483648,2,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1066,10 +1066,10 @@ define <4 x i32> @test_urem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_even_INT_MIN:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2147483648,2,1073741824]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1083,10 +1083,10 @@ define <4 x i32> @test_urem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_even_INT_MIN:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,u,1073741824,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2147483648,2,1073741824]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1099,7 +1099,7 @@ define <4 x i32> @test_urem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_even_INT_MIN:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1110,7 +1110,7 @@ define <4 x i32> @test_urem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_even_INT_MIN:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1129,10 +1129,10 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_allones_and_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3435973837,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,3435973837]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrlq $32, %xmm0
@@ -1144,7 +1144,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_allones_and_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,3435973837]
; CHECK-SSE41-NEXT: pmovsxdq {{.*#+}} xmm1 = [1,268435456]
; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
@@ -1158,8 +1158,8 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_allones_and_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3435973837]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,268435456,u]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1170,7 +1170,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_allones_and_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3435973837]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1181,7 +1181,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_allones_and_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3435973837]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1198,11 +1198,11 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_even_allones_and_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,1,3067833783]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,1,268435456,2147483648]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3067833783,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,2147483648,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1216,10 +1216,10 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_even_allones_and_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,1,3067833783]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,2147483648,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,1,268435456,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1233,10 +1233,10 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_even_allones_and_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,3067833783]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,u,2147483648,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,1,268435456,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1249,7 +1249,7 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_even_allones_and_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,3067833783]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1260,7 +1260,7 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_even_allones_and_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,3067833783]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1277,11 +1277,11 @@ define <4 x i32> @test_urem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
; CHECK-SSE2-LABEL: test_urem_odd_even_allones_and_poweroftwo:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,3264175145]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,1,268435456,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3264175145,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,1073741824,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1295,10 +1295,10 @@ define <4 x i32> @test_urem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
;
; CHECK-SSE41-LABEL: test_urem_odd_even_allones_and_poweroftwo:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,3264175145]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,1073741824,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,1,268435456,1073741824]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1312,10 +1312,10 @@ define <4 x i32> @test_urem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
;
; CHECK-AVX1-LABEL: test_urem_odd_even_allones_and_poweroftwo:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3264175145]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,u,1073741824,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,1,268435456,1073741824]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1328,7 +1328,7 @@ define <4 x i32> @test_urem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
;
; CHECK-AVX2-LABEL: test_urem_odd_even_allones_and_poweroftwo:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3264175145]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1339,7 +1339,7 @@ define <4 x i32> @test_urem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi
;
; CHECK-AVX512VL-LABEL: test_urem_odd_even_allones_and_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3264175145]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1358,9 +1358,9 @@ define <4 x i32> @test_urem_odd_allones_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_allones_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,0,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3435973837,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -1370,7 +1370,7 @@ define <4 x i32> @test_urem_odd_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_allones_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,0,3435973837]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993459,1,4294967295,858993459]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -1379,7 +1379,7 @@ define <4 x i32> @test_urem_odd_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX-LABEL: test_urem_odd_allones_and_one:
; CHECK-AVX: # %bb.0:
-; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,0,3435973837]
; CHECK-AVX-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX-NEXT: vpsrld $31, %xmm0, %xmm0
@@ -1395,11 +1395,11 @@ define <4 x i32> @test_urem_even_allones_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_even_allones_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,0,3067833783]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,1,1,2147483648]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3067833783,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,2147483648,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1413,10 +1413,10 @@ define <4 x i32> @test_urem_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_even_allones_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,0,3067833783]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,2147483648,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,1,1,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1430,10 +1430,10 @@ define <4 x i32> @test_urem_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_even_allones_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,0,3067833783]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,u,2147483648,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,1,1,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1446,7 +1446,7 @@ define <4 x i32> @test_urem_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_even_allones_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,0,3067833783]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1457,7 +1457,7 @@ define <4 x i32> @test_urem_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_even_allones_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,0,3067833783]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1474,10 +1474,10 @@ define <4 x i32> @test_urem_odd_even_allones_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_even_allones_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,0,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3264175145,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,1,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -1490,9 +1490,9 @@ define <4 x i32> @test_urem_odd_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_even_allones_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,0,3264175145]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,1,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1506,9 +1506,9 @@ define <4 x i32> @test_urem_odd_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_even_allones_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,0,3264175145]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,1,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1521,7 +1521,7 @@ define <4 x i32> @test_urem_odd_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_even_allones_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,0,3264175145]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1532,7 +1532,7 @@ define <4 x i32> @test_urem_odd_even_allones_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_even_allones_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,0,3264175145]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1551,10 +1551,10 @@ define <4 x i32> @test_urem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_poweroftwo_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3435973837]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3435973837,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1,1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -1567,9 +1567,9 @@ define <4 x i32> @test_urem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_poweroftwo_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3435973837]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1,1]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1583,9 +1583,9 @@ define <4 x i32> @test_urem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_poweroftwo_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,268435456,1,1]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1598,7 +1598,7 @@ define <4 x i32> @test_urem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_poweroftwo_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1609,7 +1609,7 @@ define <4 x i32> @test_urem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1626,11 +1626,11 @@ define <4 x i32> @test_urem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_even_poweroftwo_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,1,0,3067833783]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,268435456,1,2147483648]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3067833783,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,u,2147483648,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1644,10 +1644,10 @@ define <4 x i32> @test_urem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_even_poweroftwo_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,1,0,3067833783]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,u,2147483648,u]
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,268435456,1,2147483648]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1661,10 +1661,10 @@ define <4 x i32> @test_urem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_even_poweroftwo_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,u,2147483648,u]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,268435456,1,2147483648]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1677,7 +1677,7 @@ define <4 x i32> @test_urem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_even_poweroftwo_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1688,7 +1688,7 @@ define <4 x i32> @test_urem_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_even_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1705,10 +1705,10 @@ define <4 x i32> @test_urem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_even_poweroftwo_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3264175145,u]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1073741824,1073741824]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -1721,9 +1721,9 @@ define <4 x i32> @test_urem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_even_poweroftwo_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3264175145]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1073741824,1073741824]
; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1737,9 +1737,9 @@ define <4 x i32> @test_urem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_even_poweroftwo_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,268435456,1073741824,1073741824]
; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1752,7 +1752,7 @@ define <4 x i32> @test_urem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_odd_even_poweroftwo_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1763,7 +1763,7 @@ define <4 x i32> @test_urem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_odd_even_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1781,10 +1781,10 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou
; CHECK-SSE2-LABEL: test_urem_odd_allones_and_poweroftwo_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,0,0,0]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,0]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrlq $32, %xmm0
@@ -1796,7 +1796,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou
;
; CHECK-SSE41-LABEL: test_urem_odd_allones_and_poweroftwo_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,0]
; CHECK-SSE41-NEXT: pmovsxdq {{.*#+}} xmm1 = [1,268435456]
; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
@@ -1810,8 +1810,8 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou
;
; CHECK-AVX1-LABEL: test_urem_odd_allones_and_poweroftwo_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,0]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,268435456,u]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1822,7 +1822,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou
;
; CHECK-AVX2-LABEL: test_urem_odd_allones_and_poweroftwo_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,0]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1833,7 +1833,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou
;
; CHECK-AVX512VL-LABEL: test_urem_odd_allones_and_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,0]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -1849,10 +1849,10 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no
; CHECK-SSE2-LABEL: test_urem_even_allones_and_poweroftwo_and_one:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,0,0,0]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,1,0]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,u,268435456,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrlq $32, %xmm0
@@ -1864,7 +1864,7 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no
;
; CHECK-SSE41-LABEL: test_urem_even_allones_and_poweroftwo_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,1,0]
; CHECK-SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = [2147483648,268435456]
; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
@@ -1878,8 +1878,8 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no
;
; CHECK-AVX1-LABEL: test_urem_even_allones_and_poweroftwo_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,0]
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [2147483648,u,268435456,u]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1890,7 +1890,7 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no
;
; CHECK-AVX2-LABEL: test_urem_even_allones_and_poweroftwo_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,0]
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1901,7 +1901,7 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no
;
; CHECK-AVX512VL-LABEL: test_urem_even_allones_and_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,0]
; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll
index 6a36cd2..8042103 100644
--- a/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll
+++ b/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll
@@ -25,7 +25,7 @@ define <4 x i1> @t32_3(<4 x i32> %X) nounwind {
; CHECK-SSE41-LABEL: t32_3:
; CHECK-SSE41: # %bb.0:
; CHECK-SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,1431655764,1431655764,1431655764]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -34,7 +34,7 @@ define <4 x i1> @t32_3(<4 x i32> %X) nounwind {
; CHECK-AVX1-LABEL: t32_3:
; CHECK-AVX1: # %bb.0:
; CHECK-AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
@@ -80,7 +80,7 @@ define <4 x i1> @t32_5(<4 x i32> %X) nounwind {
; CHECK-SSE41-LABEL: t32_5:
; CHECK-SSE41: # %bb.0:
; CHECK-SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,858993458,858993458]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -89,7 +89,7 @@ define <4 x i1> @t32_5(<4 x i32> %X) nounwind {
; CHECK-AVX1-LABEL: t32_5:
; CHECK-AVX1: # %bb.0:
; CHECK-AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837]
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
@@ -140,7 +140,7 @@ define <4 x i1> @t32_6_part0(<4 x i32> %X) nounwind {
; CHECK-SSE41-LABEL: t32_6_part0:
; CHECK-SSE41: # %bb.0:
; CHECK-SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $1, %xmm1
; CHECK-SSE41-NEXT: pslld $31, %xmm0
@@ -153,7 +153,7 @@ define <4 x i1> @t32_6_part0(<4 x i32> %X) nounwind {
; CHECK-AVX1-LABEL: t32_6_part0:
; CHECK-AVX1: # %bb.0:
; CHECK-AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -211,7 +211,7 @@ define <4 x i1> @t32_6_part1(<4 x i32> %X) nounwind {
; CHECK-SSE41-LABEL: t32_6_part1:
; CHECK-SSE41: # %bb.0:
; CHECK-SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $1, %xmm1
; CHECK-SSE41-NEXT: pslld $31, %xmm0
@@ -224,7 +224,7 @@ define <4 x i1> @t32_6_part1(<4 x i32> %X) nounwind {
; CHECK-AVX1-LABEL: t32_6_part1:
; CHECK-AVX1: # %bb.0:
; CHECK-AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -275,7 +275,7 @@ define <4 x i1> @t32_tautological(<4 x i32> %X) nounwind {
; CHECK-SSE41-LABEL: t32_tautological:
; CHECK-SSE41: # %bb.0:
; CHECK-SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [4294967295,4294967295,4294967295,1431655764]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -286,7 +286,7 @@ define <4 x i1> @t32_tautological(<4 x i32> %X) nounwind {
; CHECK-AVX1-LABEL: t32_tautological:
; CHECK-AVX1: # %bb.0:
; CHECK-AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-splat.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-splat.ll
index 2166e43..b490c3c 100644
--- a/llvm/test/CodeGen/X86/urem-seteq-vec-splat.ll
+++ b/llvm/test/CodeGen/X86/urem-seteq-vec-splat.ll
@@ -23,7 +23,7 @@ define <4 x i32> @test_urem_odd_25(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_25:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798691,171798691,171798691,171798691]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -32,7 +32,7 @@ define <4 x i32> @test_urem_odd_25(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_odd_25:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
@@ -83,7 +83,7 @@ define <4 x i32> @test_urem_even_100(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_even_100:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $2, %xmm1
; CHECK-SSE41-NEXT: pslld $30, %xmm0
@@ -96,7 +96,7 @@ define <4 x i32> @test_urem_even_100(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_even_100:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-AVX1-NEXT: vpsrld $2, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $30, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -139,9 +139,9 @@ define <4 x i32> @test_urem_odd_neg25(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_odd_neg25:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,1030792151,1030792151,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1030792151,u,3264175145,u]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -151,7 +151,7 @@ define <4 x i32> @test_urem_odd_neg25(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_odd_neg25:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,1030792151,1030792151,3264175145]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798691,1,1,171798691]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -160,7 +160,7 @@ define <4 x i32> @test_urem_odd_neg25(<4 x i32> %X) nounwind {
;
; CHECK-AVX-LABEL: test_urem_odd_neg25:
; CHECK-AVX: # %bb.0:
-; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,1030792151,1030792151,3264175145]
; CHECK-AVX-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX-NEXT: vpsrld $31, %xmm0, %xmm0
@@ -176,9 +176,9 @@ define <4 x i32> @test_urem_even_neg100(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_urem_even_neg100:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4252017623,3264175145,4252017623,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3264175145,3264175145,3264175145,3264175145]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1
@@ -192,7 +192,7 @@ define <4 x i32> @test_urem_even_neg100(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: test_urem_even_neg100:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4252017623,3264175145,4252017623,3264175145]
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1
; CHECK-SSE41-NEXT: psrld $2, %xmm1
; CHECK-SSE41-NEXT: pslld $30, %xmm0
@@ -205,7 +205,7 @@ define <4 x i32> @test_urem_even_neg100(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: test_urem_even_neg100:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4252017623,3264175145,4252017623,3264175145]
; CHECK-AVX1-NEXT: vpsrld $2, %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpslld $30, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -216,7 +216,7 @@ define <4 x i32> @test_urem_even_neg100(<4 x i32> %X) nounwind {
;
; CHECK-AVX2-LABEL: test_urem_even_neg100:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4252017623,3264175145,4252017623,3264175145]
; CHECK-AVX2-NEXT: vpsrld $2, %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpslld $30, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -227,7 +227,7 @@ define <4 x i32> @test_urem_even_neg100(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_urem_even_neg100:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4252017623,3264175145,4252017623,3264175145]
; CHECK-AVX512VL-NEXT: vprord $2, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -277,7 +277,7 @@ define <4 x i32> @test_urem_odd_undef1(<4 x i32> %X) nounwind {
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: psrld $3, %xmm2
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [25,25,25,25]
; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0
; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -293,7 +293,7 @@ define <4 x i32> @test_urem_odd_undef1(<4 x i32> %X) nounwind {
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpsrld $3, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [25,25,25,25]
; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -372,7 +372,7 @@ define <4 x i32> @test_urem_even_undef1(<4 x i32> %X) nounwind {
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: psrld $5, %xmm2
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [100,100,100,100]
; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0
; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -388,7 +388,7 @@ define <4 x i32> @test_urem_even_undef1(<4 x i32> %X) nounwind {
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpsrld $5, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [100,100,100,100]
; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
index 84856aa..e5b19a5 100644
--- a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
+++ b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
@@ -25,7 +25,7 @@ define <4 x i1> @t0_all_tautological(<4 x i32> %X) nounwind {
define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: t1_all_odd_eq:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -33,7 +33,7 @@ define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: t1_all_odd_eq:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,4294967295,4294967295,4294967295]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -43,7 +43,7 @@ define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: t1_all_odd_eq:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -76,7 +76,7 @@ define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind {
define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: t1_all_odd_ne:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -84,7 +84,7 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
;
; CHECK-SSE41-LABEL: t1_all_odd_ne:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,4294967295,4294967295,4294967295]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
@@ -95,7 +95,7 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
;
; CHECK-AVX1-LABEL: t1_all_odd_ne:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531]
; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -187,7 +187,7 @@ define <2 x i1> @t3_wide(<2 x i64> %X) nounwind {
; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm3
; CHECK-SSE2-NEXT: psrlq $32, %xmm3
; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311530,2863311530]
; CHECK-SSE2-NEXT: paddq %xmm3, %xmm0
; CHECK-SSE2-NEXT: psllq $32, %xmm0
; CHECK-SSE2-NEXT: paddq %xmm2, %xmm0
@@ -212,7 +212,7 @@ define <2 x i1> @t3_wide(<2 x i64> %X) nounwind {
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm3
; CHECK-SSE41-NEXT: psrlq $32, %xmm3
; CHECK-SSE41-NEXT: pmuludq %xmm1, %xmm3
-; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311530,2863311530]
; CHECK-SSE41-NEXT: paddq %xmm3, %xmm0
; CHECK-SSE41-NEXT: psllq $32, %xmm0
; CHECK-SSE41-NEXT: paddq %xmm2, %xmm0
@@ -236,7 +236,7 @@ define <2 x i1> @t3_wide(<2 x i64> %X) nounwind {
; CHECK-AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; CHECK-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
; CHECK-AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311530,2863311530]
; CHECK-AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
@@ -255,7 +255,7 @@ define <2 x i1> @t3_wide(<2 x i64> %X) nounwind {
; CHECK-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; CHECK-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3
; CHECK-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; CHECK-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311530,2863311530]
; CHECK-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
diff --git a/llvm/test/CodeGen/X86/var-permute-128.ll b/llvm/test/CodeGen/X86/var-permute-128.ll
index 83a0ddb..fce8795 100644
--- a/llvm/test/CodeGen/X86/var-permute-128.ll
+++ b/llvm/test/CodeGen/X86/var-permute-128.ll
@@ -241,7 +241,7 @@ define <4 x i32> @var_shuffle_v4i32(<4 x i32> %v, <4 x i32> %indices) nounwind {
;
; SSE41-LABEL: var_shuffle_v4i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [67372036,67372036,67372036,67372036]
; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE41-NEXT: pshufb %xmm1, %xmm0
; SSE41-NEXT: retq
@@ -319,7 +319,7 @@ define <4 x i32> @var_shuffle_zero_v4i32(<4 x i32> %v, <4 x i32> %indices) nounw
; SSE41-NEXT: pmaxud %xmm1, %xmm2
; SSE41-NEXT: pcmpeqd %xmm1, %xmm2
; SSE41-NEXT: por %xmm2, %xmm1
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [67372036,67372036,67372036,67372036]
; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE41-NEXT: por %xmm2, %xmm1
; SSE41-NEXT: pshufb %xmm1, %xmm0
@@ -1261,7 +1261,7 @@ define <4 x float> @var_shuffle_v4f32(<4 x float> %v, <4 x i32> %indices) nounwi
;
; SSE41-LABEL: var_shuffle_v4f32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [67372036,67372036,67372036,67372036]
; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE41-NEXT: pshufb %xmm1, %xmm0
; SSE41-NEXT: retq
@@ -1339,7 +1339,7 @@ define <4 x float> @var_shuffle_zero_v4f32(<4 x float> %v, <4 x i32> %indices) n
; SSE41-NEXT: pmaxud %xmm1, %xmm2
; SSE41-NEXT: pcmpeqd %xmm1, %xmm2
; SSE41-NEXT: por %xmm2, %xmm1
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [67372036,67372036,67372036,67372036]
; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE41-NEXT: por %xmm2, %xmm1
; SSE41-NEXT: pshufb %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vec_reassociate.ll b/llvm/test/CodeGen/X86/vec_reassociate.ll
index a9473fff..4703ca3 100644
--- a/llvm/test/CodeGen/X86/vec_reassociate.ll
+++ b/llvm/test/CodeGen/X86/vec_reassociate.ll
@@ -38,13 +38,13 @@ define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: mul_4i32:
; X86: # %bb.0:
; X86-NEXT: pmulld %xmm1, %xmm0
-; X86-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [4,6,6,4]
; X86-NEXT: retl
;
; X64-LABEL: mul_4i32:
; X64: # %bb.0:
; X64-NEXT: pmulld %xmm1, %xmm0
-; X64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,6,6,4]
; X64-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 3, i32 4>
%2 = mul <4 x i32> %a1, <i32 4, i32 3, i32 2, i32 1>
@@ -56,13 +56,13 @@ define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: mul_4i32_commute:
; X86: # %bb.0:
; X86-NEXT: pmulld %xmm1, %xmm0
-; X86-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [4,6,6,4]
; X86-NEXT: retl
;
; X64-LABEL: mul_4i32_commute:
; X64: # %bb.0:
; X64-NEXT: pmulld %xmm1, %xmm0
-; X64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,6,6,4]
; X64-NEXT: retq
%1 = mul <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %a0
%2 = mul <4 x i32> <i32 4, i32 3, i32 2, i32 1>, %a1
diff --git a/llvm/test/CodeGen/X86/vector-compress.ll b/llvm/test/CodeGen/X86/vector-compress.ll
index ac932d5..1a63515 100644
--- a/llvm/test/CodeGen/X86/vector-compress.ll
+++ b/llvm/test/CodeGen/X86/vector-compress.ll
@@ -1090,7 +1090,6 @@ define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8>
; AVX2-NEXT: pushq %r12
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1
; AVX2-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp)
@@ -1335,7 +1334,6 @@ define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8>
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
; AVX2-NEXT: vpsllw $7, %ymm1, %ymm1
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm3
; AVX2-NEXT: vmovaps %ymm2, (%rsp)
@@ -4733,7 +4731,6 @@ define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) nounwind {
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpextrb $0, %xmm0, -{{[0-9]+}}(%rsp)
@@ -4751,72 +4748,7 @@ define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) nounwind {
; AVX2-NEXT: vpextrb $3, %xmm1, %ecx
; AVX2-NEXT: andl $1, %ecx
; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: vpextrb $4, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: vpextrb $4, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addq %rcx, %rax
-; AVX2-NEXT: vpextrb $5, %xmm1, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $5, %xmm0, -24(%rsp,%rax)
-; AVX2-NEXT: vpextrb $6, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addq %rcx, %rax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: andl $15, %ecx
-; AVX2-NEXT: vpextrb $6, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: vpextrb $7, %xmm1, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $7, %xmm0, -24(%rsp,%rax)
-; AVX2-NEXT: vpextrb $8, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addq %rcx, %rax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: andl $15, %ecx
-; AVX2-NEXT: vpextrb $8, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: vpextrb $9, %xmm1, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $9, %xmm0, -24(%rsp,%rax)
-; AVX2-NEXT: vpextrb $10, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addq %rcx, %rax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: andl $15, %ecx
-; AVX2-NEXT: vpextrb $10, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: vpextrb $11, %xmm1, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $11, %xmm0, -24(%rsp,%rax)
-; AVX2-NEXT: vpextrb $12, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addq %rcx, %rax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: andl $15, %ecx
-; AVX2-NEXT: vpextrb $12, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: vpextrb $13, %xmm1, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $13, %xmm0, -24(%rsp,%rax)
-; AVX2-NEXT: vpextrb $14, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addl %ecx, %eax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: andl $15, %ecx
-; AVX2-NEXT: vpextrb $14, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $15, %xmm0, -24(%rsp,%rax)
+; AVX2-NEXT: vpextrb $15, %xmm0, -24(%rsp,%rcx)
; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX2-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-fshl-128.ll b/llvm/test/CodeGen/X86/vector-fshl-128.ll
index 762900e..a0c2760 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-128.ll
@@ -1821,9 +1821,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [32,u,128,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: por %xmm1, %xmm0
@@ -1841,7 +1841,7 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; SSE41-NEXT: psrld $28, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128]
; SSE41-NEXT: por %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -1854,7 +1854,7 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; AVX1-NEXT: vpsrld $28, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128]
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
@@ -1935,9 +1935,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,64,128]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [32,u,128,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE2-NEXT: por %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll
index 445e572..2fadf5f 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll
@@ -1647,7 +1647,7 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [256,512,1024,2048]
; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsrld $25, %xmm1, %xmm3
; AVX1-NEXT: vpsrld $27, %xmm1, %xmm4
@@ -1656,7 +1656,7 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
; AVX1-NEXT: vpsrld $28, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128]
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
index d0690bd..ec2efcd 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
@@ -1302,9 +1302,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
; SSE2-LABEL: constant_funnnel_v4i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,128,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1316,8 +1316,8 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
; SSE41-LABEL: constant_funnnel_v4i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,128,u]
+; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1328,8 +1328,8 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
; AVX1-LABEL: constant_funnnel_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32,u,128,u]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1394,9 +1394,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
; X86-SSE2-LABEL: constant_funnnel_v4i32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,64,128]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [32,u,128,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
index 421fa98..5f7e407 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
@@ -1082,13 +1082,13 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind {
; AVX1-LABEL: constant_funnnel_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32,u,128,u]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [512,u,2048,u]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [256,512,1024,2048]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovshdup {{.*#+}} ymm2 = ymm0[1,1,3,3,5,5,7,7]
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
index b378dce..304daab 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
@@ -319,9 +319,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
; SSE2-LABEL: constant_funnnel_v2i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,1,1]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,1,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -333,8 +333,8 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
; SSE41-LABEL: constant_funnnel_v2i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,1,u]
+; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,1,1]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -345,8 +345,8 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
; AVX1-LABEL: constant_funnnel_v2i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32,u,1,u]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -411,9 +411,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
; X86-SSE2-LABEL: constant_funnnel_v2i32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,1,1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [32,u,1,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/llvm/test/CodeGen/X86/vector-fshl-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-sub128.ll
index 06ff7e7..ae5dd18 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-sub128.ll
@@ -500,9 +500,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
; SSE2-NEXT: psrld $27, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,1,1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [32,u,1,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: por %xmm1, %xmm0
@@ -514,7 +514,7 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
; SSE41-NEXT: psrld $27, %xmm2
; SSE41-NEXT: psrld $28, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7]
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,1,1]
; SSE41-NEXT: por %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -523,7 +523,7 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
; AVX1-NEXT: vpsrld $27, %xmm1, %xmm2
; AVX1-NEXT: vpsrld $28, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7]
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,1,1]
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
@@ -598,9 +598,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
; X86-SSE2-NEXT: psrld $27, %xmm2
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,1,1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [32,u,1,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE2-NEXT: por %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll
index d16b28a..33a6a76 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll
@@ -1741,9 +1741,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,67108864,33554432]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [134217728,u,33554432,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: por %xmm1, %xmm0
@@ -1761,7 +1761,7 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; SSE41-NEXT: psrld $4, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,67108864,33554432]
; SSE41-NEXT: por %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -1774,7 +1774,7 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; AVX1-NEXT: vpsrld $4, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,134217728,67108864,33554432]
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
@@ -1856,9 +1856,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [268435456,134217728,67108864,33554432]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [134217728,u,33554432,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE2-NEXT: por %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll
index a387562..217431be 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll
@@ -1403,7 +1403,7 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [16777216,8388608,4194304,2097152]
; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsrld $7, %xmm1, %xmm3
; AVX1-NEXT: vpsrld $5, %xmm1, %xmm4
@@ -1412,7 +1412,7 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
; AVX1-NEXT: vpsrld $4, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,134217728,67108864,33554432]
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
index 4969cb5..5d01dfd 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
@@ -1380,9 +1380,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
; SSE2-LABEL: constant_funnnel_v4i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,67108864,33554432]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [134217728,u,33554432,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1394,8 +1394,8 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
; SSE41-LABEL: constant_funnnel_v4i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [134217728,u,33554432,u]
+; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,67108864,33554432]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1406,8 +1406,8 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
; AVX1-LABEL: constant_funnnel_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [134217728,u,33554432,u]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,134217728,67108864,33554432]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1472,9 +1472,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
; X86-SSE2-LABEL: constant_funnnel_v4i32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [268435456,134217728,67108864,33554432]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [134217728,u,33554432,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
index e2a3e26..4dc931d 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
@@ -1134,13 +1134,13 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind {
; AVX1-LABEL: constant_funnnel_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [134217728,u,33554432,u]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [8388608,u,2097152,u]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,134217728,67108864,33554432]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [16777216,8388608,4194304,2097152]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovshdup {{.*#+}} ymm2 = ymm0[1,1,3,3,5,5,7,7]
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
index ef5ffe4..4b42b18 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
@@ -341,9 +341,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
; SSE2-LABEL: constant_funnnel_v2i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,1,1]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [134217728,u,1,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -355,8 +355,8 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
; SSE41-LABEL: constant_funnnel_v2i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [134217728,u,1,u]
+; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,1,1]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -367,8 +367,8 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
; AVX1-LABEL: constant_funnnel_v2i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [134217728,u,1,u]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,134217728,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -433,9 +433,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
; X86-SSE2-LABEL: constant_funnnel_v2i32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [268435456,134217728,1,1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [134217728,u,1,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll
index 816d5ca..e68d1d7 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll
@@ -171,7 +171,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [37632,37632,37632,37632,37632,37632,37632,37632]
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; SSE-NEXT: pmulhw %xmm3, %xmm2
; SSE-NEXT: psrlw $8, %xmm2
; SSE-NEXT: pxor %xmm4, %xmm4
@@ -193,7 +193,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [37632,37632,37632,37632,37632,37632,37632,37632]
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; AVX1-NEXT: vpmulhw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -260,11 +260,11 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind {
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [37632,20224,11008,47872,26368,14592,14592,37632]
+; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,147,0,79,0,43,0,187,0,103,0,57,0,57,0,147]
; SSE-NEXT: psrlw $8, %xmm2
; SSE-NEXT: pxor %xmm3, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37632,33024,14592,26368,47872,11008,20224,37632]
+; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147]
; SSE-NEXT: psrlw $8, %xmm3
; SSE-NEXT: packuswb %xmm2, %xmm3
; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -287,10 +287,10 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind {
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [37632,20224,11008,47872,26368,14592,14592,37632]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,147,0,79,0,43,0,187,0,103,0,57,0,57,0,147]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37632,33024,14592,26368,47872,11008,20224,37632]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -561,7 +561,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [37632,37632,37632,37632,37632,37632,37632,37632]
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; SSE-NEXT: pmulhw %xmm3, %xmm2
; SSE-NEXT: psrlw $8, %xmm2
; SSE-NEXT: pxor %xmm4, %xmm4
@@ -588,7 +588,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [37632,37632,37632,37632,37632,37632,37632,37632]
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; AVX1-NEXT: vpmulhw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -667,11 +667,11 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [37632,20224,11008,47872,26368,14592,14592,37632]
+; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,147,0,79,0,43,0,187,0,103,0,57,0,57,0,147]
; SSE2-NEXT: psrlw $8, %xmm1
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37632,33024,14592,26368,47872,11008,20224,37632]
+; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147]
; SSE2-NEXT: psrlw $8, %xmm3
; SSE2-NEXT: packuswb %xmm1, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,0,255,0,0,255,255,0,0,255,0,0,0,255]
@@ -706,11 +706,11 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [37632,20224,11008,47872,26368,14592,14592,37632]
+; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,147,0,79,0,43,0,187,0,103,0,57,0,57,0,147]
; SSE41-NEXT: psrlw $8, %xmm2
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37632,33024,14592,26368,47872,11008,20224,37632]
+; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147]
; SSE41-NEXT: psrlw $8, %xmm3
; SSE41-NEXT: packuswb %xmm2, %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,0,255,0,0,255,255,0,0,255,0,0,0,255]
@@ -741,10 +741,10 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind {
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [37632,20224,11008,47872,26368,14592,14592,37632]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,147,0,79,0,43,0,187,0,103,0,57,0,57,0,147]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37632,33024,14592,26368,47872,11008,20224,37632]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3
@@ -1116,11 +1116,11 @@ define <16 x i8> @PR143238(<16 x i8> %a0) {
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [26368,47872,11008,20224,37632,35072,33024,30976]
+; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,103,0,187,0,43,0,79,0,147,0,137,0,129,0,121]
; SSE-NEXT: psrlw $8, %xmm2
; SSE-NEXT: pxor %xmm3, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [33024,22016,33024,26368,11008,37632,33024,14592]
+; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,129,0,86,0,129,0,103,0,43,0,147,0,129,0,57]
; SSE-NEXT: psrlw $8, %xmm3
; SSE-NEXT: packuswb %xmm2, %xmm3
; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -1144,10 +1144,10 @@ define <16 x i8> @PR143238(<16 x i8> %a0) {
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [26368,47872,11008,20224,37632,35072,33024,30976]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,103,0,187,0,43,0,79,0,147,0,137,0,129,0,121]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [33024,22016,33024,26368,11008,37632,33024,14592]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,129,0,86,0,129,0,103,0,43,0,147,0,129,0,57]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll
index 63c69e5..7355f36 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll
@@ -161,7 +161,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [37632,37632,37632,37632,37632,37632,37632,37632]
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; AVX1-NEXT: vpmulhw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
@@ -198,7 +198,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632]
+; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; AVX2NOBW-NEXT: vpmulhw %ymm3, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
@@ -245,10 +245,10 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37632,20224,11008,47872,26368,14592,33024,37632]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [47872,12544,26368,6912,14592,30976,33024,35072]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
@@ -266,10 +266,10 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm1, %xmm4
; AVX1-NEXT: vpsubb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [35072,33024,30976,14592,6912,26368,12544,47872]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [37632,33024,14592,26368,47872,11008,20224,37632]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147]
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
@@ -291,10 +291,10 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [35072,33024,30976,14592,6912,26368,12544,47872,37632,20224,11008,47872,26368,14592,33024,37632]
+; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37632,33024,14592,26368,47872,11008,20224,37632,47872,12544,26368,6912,14592,30976,33024,35072]
+; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX2NOBW-NEXT: vpackuswb %ymm2, %ymm3, %ymm2
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
@@ -539,7 +539,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [37632,37632,37632,37632,37632,37632,37632,37632]
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; AVX1-NEXT: vpmulhw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -585,7 +585,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632]
+; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; AVX2NOBW-NEXT: vpmulhw %ymm3, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
@@ -640,10 +640,10 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37632,20224,11008,47872,26368,14592,33024,37632]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [47872,12544,26368,6912,14592,30976,33024,35072]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm4
@@ -668,10 +668,10 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [35072,33024,30976,14592,6912,26368,12544,47872]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [37632,33024,14592,26368,47872,11008,20224,37632]
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147]
; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
; AVX1-NEXT: vpackuswb %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vpaddb %xmm4, %xmm3, %xmm3
@@ -699,10 +699,10 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [35072,33024,30976,14592,6912,26368,12544,47872,37632,20224,11008,47872,26368,14592,33024,37632]
+; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37632,33024,14592,26368,47872,11008,20224,37632,47872,12544,26368,6912,14592,30976,33024,35072]
+; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX2NOBW-NEXT: vpackuswb %ymm2, %ymm3, %ymm2
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll
index 6bc4fcb..5445330 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll
@@ -132,7 +132,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31]
-; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632]
+; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; AVX512F-NEXT: vpmulhw %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
@@ -169,7 +169,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632]
+; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; AVX512BW-NEXT: vpmulhw %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
@@ -199,10 +199,10 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
-; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [35072,18176,37632,4864,20224,10496,11008,45824,37632,20224,11008,47872,26368,14592,33024,37632]
+; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,137,0,71,0,147,0,19,0,79,0,41,0,43,0,179,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147]
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
-; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [6912,28416,14592,15104,30976,32000,33024,34048,47872,12544,26368,6912,14592,30976,33024,35072]
+; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [0,27,0,111,0,57,0,59,0,121,0,125,0,129,0,133,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137]
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512F-NEXT: vpackuswb %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
@@ -220,10 +220,10 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm1, %ymm4
; AVX512F-NEXT: vpsubb %ymm4, %ymm2, %ymm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [35072,33024,30976,14592,6912,26368,12544,47872,34048,33024,32000,30976,15104,14592,28416,6912]
+; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,133,0,129,0,125,0,121,0,59,0,57,0,111,0,27]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [37632,33024,14592,26368,47872,11008,20224,37632,45824,11008,10496,20224,4864,37632,18176,35072]
+; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,179,0,43,0,41,0,79,0,19,0,147,0,71,0,137]
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512F-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0
@@ -245,10 +245,10 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [35072,33024,30976,14592,6912,26368,12544,47872,34048,33024,32000,30976,15104,14592,28416,6912,35072,18176,37632,4864,20224,10496,11008,45824,37632,20224,11008,47872,26368,14592,33024,37632]
+; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,133,0,129,0,125,0,121,0,59,0,57,0,111,0,27,0,137,0,71,0,147,0,19,0,79,0,41,0,43,0,179,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147]
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
-; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [37632,33024,14592,26368,47872,11008,20224,37632,45824,11008,10496,20224,4864,37632,18176,35072,6912,28416,14592,15104,30976,32000,33024,34048,47872,12544,26368,6912,14592,30976,33024,35072]
+; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,179,0,43,0,41,0,79,0,19,0,147,0,71,0,137,0,27,0,111,0,57,0,59,0,121,0,125,0,129,0,133,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137]
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
@@ -444,7 +444,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
-; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632]
+; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; AVX512F-NEXT: vpmulhw %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
@@ -490,7 +490,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632]
+; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147]
; AVX512BW-NEXT: vpmulhw %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
@@ -524,10 +524,10 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
-; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [35072,18176,37632,4864,20224,10496,11008,45824,37632,20224,11008,47872,26368,14592,33024,37632]
+; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,137,0,71,0,147,0,19,0,79,0,41,0,43,0,179,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147]
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
-; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [6912,28416,14592,15104,30976,32000,33024,34048,47872,12544,26368,6912,14592,30976,33024,35072]
+; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,27,0,111,0,57,0,59,0,121,0,125,0,129,0,133,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm4
@@ -552,10 +552,10 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vpor %ymm3, %ymm5, %ymm3
; AVX512F-NEXT: vpsubb %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [35072,33024,30976,14592,6912,26368,12544,47872,34048,33024,32000,30976,15104,14592,28416,6912]
+; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,133,0,129,0,125,0,121,0,59,0,57,0,111,0,27]
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [37632,33024,14592,26368,47872,11008,20224,37632,45824,11008,10496,20224,4864,37632,18176,35072]
+; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,179,0,43,0,41,0,79,0,19,0,147,0,71,0,137]
; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5
; AVX512F-NEXT: vpackuswb %ymm3, %ymm5, %ymm3
; AVX512F-NEXT: vpaddb %ymm4, %ymm3, %ymm3
@@ -583,10 +583,10 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [35072,33024,30976,14592,6912,26368,12544,47872,34048,33024,32000,30976,15104,14592,28416,6912,35072,18176,37632,4864,20224,10496,11008,45824,37632,20224,11008,47872,26368,14592,33024,37632]
+; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,133,0,129,0,125,0,121,0,59,0,57,0,111,0,27,0,137,0,71,0,147,0,19,0,79,0,41,0,43,0,179,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147]
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
-; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [37632,33024,14592,26368,47872,11008,20224,37632,45824,11008,10496,20224,4864,37632,18176,35072,6912,28416,14592,15104,30976,32000,33024,34048,47872,12544,26368,6912,14592,30976,33024,35072]
+; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,179,0,43,0,41,0,79,0,19,0,147,0,71,0,137,0,27,0,111,0,57,0,59,0,121,0,125,0,129,0,133,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137]
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll
index 33d80f6..6cd5098 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll
@@ -169,7 +169,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [37,37,37,37,37,37,37,37]
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm4
@@ -209,7 +209,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -270,22 +270,22 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,256,256,256,256,256,256,256]
; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [147,79,171,117,205,57,57,37]
+; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0]
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [256,256,256,256,256,256,256,128]
; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37,32,57,205,117,171,79,147]
+; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0]
; SSE2-NEXT: psrlw $8, %xmm3
; SSE2-NEXT: packuswb %xmm2, %xmm3
; SSE2-NEXT: psubb %xmm3, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,128,0,0,0,128]
+; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [128,0,0,0,128,0,0,0]
+; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0]
; SSE2-NEXT: psrlw $8, %xmm0
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: paddb %xmm3, %xmm0
@@ -309,7 +309,7 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: psllw $7, %xmm3
; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1,2,3,4,5,6,7]
; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [147,79,171,117,205,57,57,37]
+; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0]
; SSE41-NEXT: psrlw $8, %xmm3
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
@@ -317,15 +317,15 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: psllw $7, %xmm4
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,5,6],xmm4[7]
; SSE41-NEXT: psrlw $8, %xmm4
-; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [37,32,57,205,117,171,79,147]
+; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0]
; SSE41-NEXT: psrlw $8, %xmm4
; SSE41-NEXT: packuswb %xmm3, %xmm4
; SSE41-NEXT: psubb %xmm4, %xmm0
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,128,0,0,0,128]
+; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,0,0,0,128,0,0,0]
+; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0]
; SSE41-NEXT: psrlw $8, %xmm2
; SSE41-NEXT: packuswb %xmm0, %xmm2
; SSE41-NEXT: paddb %xmm4, %xmm2
@@ -346,22 +346,22 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind {
; AVX1-NEXT: vpsllw $7, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [147,79,171,117,205,57,57,37]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpsllw $7, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm4[7]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37,32,57,205,117,171,79,147]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,0,0,128,0,0,0,128]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [128,0,0,0,128,0,0,0]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0]
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
@@ -638,7 +638,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [37,37,37,37,37,37,37,37]
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm4
@@ -690,7 +690,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -763,23 +763,23 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,256,256,256,256,256,256,256]
; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [147,79,171,117,205,57,57,37]
+; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0]
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [256,256,256,256,256,256,256,128]
; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37,32,57,205,117,171,79,147]
+; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0]
; SSE2-NEXT: psrlw $8, %xmm3
; SSE2-NEXT: packuswb %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psubb %xmm3, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [0,0,0,128,0,0,0,128]
+; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; SSE2-NEXT: psrlw $8, %xmm4
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,0,0,0,128,0,0,0]
+; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0]
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: packuswb %xmm4, %xmm2
; SSE2-NEXT: paddb %xmm3, %xmm2
@@ -809,7 +809,7 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: psllw $7, %xmm3
; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1,2,3,4,5,6,7]
; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [147,79,171,117,205,57,57,37]
+; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0]
; SSE41-NEXT: psrlw $8, %xmm3
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
@@ -817,16 +817,16 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: psllw $7, %xmm4
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,5,6],xmm4[7]
; SSE41-NEXT: psrlw $8, %xmm4
-; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [37,32,57,205,117,171,79,147]
+; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0]
; SSE41-NEXT: psrlw $8, %xmm4
; SSE41-NEXT: packuswb %xmm3, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psubb %xmm4, %xmm2
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,128,0,0,0,128]
+; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; SSE41-NEXT: psrlw $8, %xmm2
-; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [128,0,0,0,128,0,0,0]
+; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0]
; SSE41-NEXT: psrlw $8, %xmm3
; SSE41-NEXT: packuswb %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm4, %xmm3
@@ -854,22 +854,22 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind {
; AVX1-NEXT: vpsllw $7, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [147,79,171,117,205,57,57,37]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpsllw $7, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm4[7]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37,32,57,205,117,171,79,147]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,128,0,0,0,128]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [128,0,0,0,128,0,0,0]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
index e43108f..98ea87c 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
@@ -166,7 +166,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [37,37,37,37,37,37,37,37]
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -200,7 +200,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
@@ -246,22 +246,22 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpsllw $7, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [147,79,171,117,205,57,32,37]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX1-NEXT: vpsllw $7, %xmm5, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [187,135,205,27,57,241,16,137]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,128,0,0,0,128]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,128,0,0,0,0,0,0]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -276,22 +276,22 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm4[7]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [137,16,241,57,27,205,135,187]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpsllw $7, %xmm5, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [37,32,57,205,117,171,79,147]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,0,0,0,128,0]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [128,0,0,0,128,0,0,0]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0]
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
@@ -312,20 +312,20 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [256,256,256,256,256,256,256,128,128,256,256,256,256,256,256,256]
; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [137,16,241,57,27,205,135,187,147,79,171,117,205,57,32,37]
+; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm4, %ymm3
; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3
-; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37,32,57,205,117,171,79,147,187,135,205,27,57,241,16,137]
+; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX2NOBW-NEXT: vpackuswb %ymm2, %ymm3, %ymm2
; AVX2NOBW-NEXT: vpsubb %ymm2, %ymm0, %ymm0
; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,0,0,0,0,0,128,0,0,0,0,128,0,0,0,128]
+; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,0,0,128,0,0,0,0,128,0,0,0,0,0,0]
+; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
; AVX2NOBW-NEXT: vpaddb %ymm2, %ymm0, %ymm0
@@ -578,7 +578,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [37,37,37,37,37,37,37,37]
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
@@ -622,7 +622,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
@@ -676,22 +676,22 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpsllw $7, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [147,79,171,117,205,57,32,37]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0]
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX1-NEXT: vpsllw $7, %xmm5, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [187,135,205,27,57,241,16,137]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [0,0,0,128,0,0,0,128]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,128,0,0,0,0,0,0]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpaddb %xmm3, %xmm4, %xmm3
@@ -713,22 +713,22 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [137,16,241,57,27,205,135,187]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0]
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpsllw $7, %xmm6, %xmm6
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6],xmm6[7]
; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [37,32,57,205,117,171,79,147]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0]
; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
; AVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm5
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6 # [0,0,0,0,0,0,128,0]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0]
; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm6
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [128,0,0,0,128,0,0,0]
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0]
; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
; AVX1-NEXT: vpackuswb %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vpaddb %xmm4, %xmm5, %xmm4
@@ -755,20 +755,20 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [256,256,256,256,256,256,256,128,128,256,256,256,256,256,256,256]
; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [137,16,241,57,27,205,135,187,147,79,171,117,205,57,32,37]
+; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm4, %ymm3
; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3
-; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37,32,57,205,117,171,79,147,187,135,205,27,57,241,16,137]
+; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX2NOBW-NEXT: vpackuswb %ymm2, %ymm3, %ymm2
; AVX2NOBW-NEXT: vpsubb %ymm2, %ymm0, %ymm3
; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm3[8],ymm1[8],ymm3[9],ymm1[9],ymm3[10],ymm1[10],ymm3[11],ymm1[11],ymm3[12],ymm1[12],ymm3[13],ymm1[13],ymm3[14],ymm1[14],ymm3[15],ymm1[15],ymm3[24],ymm1[24],ymm3[25],ymm1[25],ymm3[26],ymm1[26],ymm3[27],ymm1[27],ymm3[28],ymm1[28],ymm3[29],ymm1[29],ymm3[30],ymm1[30],ymm3[31],ymm1[31]
-; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,128,0,0,0,0,128,0,0,0,128]
+; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm4, %ymm4
; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[4],ymm1[4],ymm3[5],ymm1[5],ymm3[6],ymm1[6],ymm3[7],ymm1[7],ymm3[16],ymm1[16],ymm3[17],ymm1[17],ymm3[18],ymm1[18],ymm3[19],ymm1[19],ymm3[20],ymm1[20],ymm3[21],ymm1[21],ymm3[22],ymm1[22],ymm3[23],ymm1[23]
-; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [128,0,0,0,128,0,0,0,0,128,0,0,0,0,0,0]
+; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX2NOBW-NEXT: vpackuswb %ymm4, %ymm3, %ymm3
; AVX2NOBW-NEXT: vpaddb %ymm2, %ymm3, %ymm2
diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll
index bf98bcc..a11fa370 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll
@@ -135,7 +135,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
@@ -169,7 +169,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
-; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX512BW-NEXT: vpmullw %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
@@ -199,20 +199,20 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31]
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [256,256,64,256,256,256,256,256,128,256,256,256,256,256,256,256]
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,27,37,19,79,41,171,101,147,79,171,117,205,57,32,37]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,0,27,0,37,0,19,0,79,0,41,0,171,0,101,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0]
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [256,256,256,256,256,256,256,256,128,256,256,256,256,256,256,256]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [27,111,57,235,241,249,8,9,187,135,205,27,57,241,16,137]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [27,0,111,0,57,0,235,0,241,0,249,0,8,0,9,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpsubb %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31]
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,128,0,0,0,0,0,128,0,0,0,128,0,0,0,128]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [0,0,0,0,0,0,0,128,0,128,0,0,0,0,0,0]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512F-NEXT: vpackuswb %ymm4, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm3, %ymm2, %ymm2
@@ -226,20 +226,20 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [256,256,256,256,256,256,256,128,256,256,256,256,256,256,256,256]
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,16,241,57,27,205,135,187,9,8,249,241,235,57,111,27]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,9,0,8,0,249,0,241,0,235,0,57,0,111,0,27,0]
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [256,256,256,256,256,256,256,128,256,256,256,256,256,64,256,256]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [37,32,57,205,117,171,79,147,101,171,41,79,19,37,27,137]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,101,0,171,0,41,0,79,0,19,0,37,0,27,0,137,0]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpsubb %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,128,0,128,0,0,0,0,0,0,0]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,0,0,128,0,0,0,128,0,0,0,0,0,128,0]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0]
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512F-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0
@@ -259,20 +259,20 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind {
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
-; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [137,16,241,57,27,205,135,187,9,8,249,241,235,57,111,27,137,27,37,19,79,41,171,101,147,79,171,117,205,57,32,37]
+; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,9,0,8,0,249,0,241,0,235,0,57,0,111,0,27,0,137,0,27,0,37,0,19,0,79,0,41,0,171,0,101,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0]
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3
; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3
-; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [37,32,57,205,117,171,79,147,101,171,41,79,19,37,27,137,27,111,57,235,241,249,8,9,187,135,205,27,57,241,16,137]
+; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,101,0,171,0,41,0,79,0,19,0,37,0,27,0,137,0,27,0,111,0,57,0,235,0,241,0,249,0,8,0,9,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0]
; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm3, %zmm2
; AVX512BW-NEXT: vpsubb %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
-; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [0,0,0,0,0,0,128,0,128,0,0,0,0,0,0,0,0,128,0,0,0,0,0,128,0,0,0,128,0,0,0,128]
+; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
-; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [128,0,0,0,128,0,0,0,128,0,0,0,0,0,128,0,0,0,0,0,0,0,0,128,0,128,0,0,0,0,0,0]
+; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512BW-NEXT: vpackuswb %zmm3, %zmm0, %zmm0
; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0
@@ -473,7 +473,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
-; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
@@ -517,7 +517,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
-; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
; AVX512BW-NEXT: vpmullw %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
@@ -551,20 +551,20 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31]
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [256,256,64,256,256,256,256,256,128,256,256,256,256,256,256,256]
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,27,37,19,79,41,171,101,147,79,171,117,205,57,32,37]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,0,27,0,37,0,19,0,79,0,41,0,171,0,101,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0]
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [256,256,256,256,256,256,256,256,128,256,256,256,256,256,256,256]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [27,111,57,235,241,249,8,9,187,135,205,27,57,241,16,137]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [27,0,111,0,57,0,235,0,241,0,249,0,8,0,9,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpsubb %ymm3, %ymm2, %ymm4
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15],ymm4[24],ymm1[24],ymm4[25],ymm1[25],ymm4[26],ymm1[26],ymm4[27],ymm1[27],ymm4[28],ymm1[28],ymm4[29],ymm1[29],ymm4[30],ymm1[30],ymm4[31],ymm1[31]
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [0,128,0,0,0,0,0,128,0,0,0,128,0,0,0,128]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[16],ymm1[16],ymm4[17],ymm1[17],ymm4[18],ymm1[18],ymm4[19],ymm1[19],ymm4[20],ymm1[20],ymm4[21],ymm1[21],ymm4[22],ymm1[22],ymm4[23],ymm1[23]
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,0,128,0,128,0,0,0,0,0,0]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
; AVX512F-NEXT: vpackuswb %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vpaddb %ymm3, %ymm4, %ymm3
@@ -585,20 +585,20 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [256,256,256,256,256,256,256,128,256,256,256,256,256,256,256,256]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [137,16,241,57,27,205,135,187,9,8,249,241,235,57,111,27]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,9,0,8,0,249,0,241,0,235,0,57,0,111,0,27,0]
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [256,256,256,256,256,256,256,128,256,256,256,256,256,64,256,256]
; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [37,32,57,205,117,171,79,147,101,171,41,79,19,37,27,137]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,101,0,171,0,41,0,79,0,19,0,37,0,27,0,137,0]
; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5
; AVX512F-NEXT: vpackuswb %ymm4, %ymm5, %ymm4
; AVX512F-NEXT: vpsubb %ymm4, %ymm0, %ymm5
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm5[8],ymm1[8],ymm5[9],ymm1[9],ymm5[10],ymm1[10],ymm5[11],ymm1[11],ymm5[12],ymm1[12],ymm5[13],ymm1[13],ymm5[14],ymm1[14],ymm5[15],ymm1[15],ymm5[24],ymm1[24],ymm5[25],ymm1[25],ymm5[26],ymm1[26],ymm5[27],ymm1[27],ymm5[28],ymm1[28],ymm5[29],ymm1[29],ymm5[30],ymm1[30],ymm5[31],ymm1[31]
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6 # [0,0,0,0,0,0,128,0,128,0,0,0,0,0,0,0]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512F-NEXT: vpsrlw $8, %ymm6, %ymm6
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm1[0],ymm5[1],ymm1[1],ymm5[2],ymm1[2],ymm5[3],ymm1[3],ymm5[4],ymm1[4],ymm5[5],ymm1[5],ymm5[6],ymm1[6],ymm5[7],ymm1[7],ymm5[16],ymm1[16],ymm5[17],ymm1[17],ymm5[18],ymm1[18],ymm5[19],ymm1[19],ymm5[20],ymm1[20],ymm5[21],ymm1[21],ymm5[22],ymm1[22],ymm5[23],ymm1[23]
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [128,0,0,0,128,0,0,0,128,0,0,0,0,0,128,0]
+; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0]
; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5
; AVX512F-NEXT: vpackuswb %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpaddb %ymm4, %ymm5, %ymm4
@@ -624,20 +624,20 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind {
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
-; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [137,16,241,57,27,205,135,187,9,8,249,241,235,57,111,27,137,27,37,19,79,41,171,101,147,79,171,117,205,57,32,37]
+; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,9,0,8,0,249,0,241,0,235,0,57,0,111,0,27,0,137,0,27,0,37,0,19,0,79,0,41,0,171,0,101,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0]
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3
; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3
-; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [37,32,57,205,117,171,79,147,101,171,41,79,19,37,27,137,27,111,57,235,241,249,8,9,187,135,205,27,57,241,16,137]
+; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,101,0,171,0,41,0,79,0,19,0,37,0,27,0,137,0,27,0,111,0,57,0,235,0,241,0,249,0,8,0,9,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0]
; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm3, %zmm2
; AVX512BW-NEXT: vpsubb %zmm2, %zmm0, %zmm3
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8],zmm1[8],zmm3[9],zmm1[9],zmm3[10],zmm1[10],zmm3[11],zmm1[11],zmm3[12],zmm1[12],zmm3[13],zmm1[13],zmm3[14],zmm1[14],zmm3[15],zmm1[15],zmm3[24],zmm1[24],zmm3[25],zmm1[25],zmm3[26],zmm1[26],zmm3[27],zmm1[27],zmm3[28],zmm1[28],zmm3[29],zmm1[29],zmm3[30],zmm1[30],zmm3[31],zmm1[31],zmm3[40],zmm1[40],zmm3[41],zmm1[41],zmm3[42],zmm1[42],zmm3[43],zmm1[43],zmm3[44],zmm1[44],zmm3[45],zmm1[45],zmm3[46],zmm1[46],zmm3[47],zmm1[47],zmm3[56],zmm1[56],zmm3[57],zmm1[57],zmm3[58],zmm1[58],zmm3[59],zmm1[59],zmm3[60],zmm1[60],zmm3[61],zmm1[61],zmm3[62],zmm1[62],zmm3[63],zmm1[63]
-; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm4 # [0,0,0,0,0,0,128,0,128,0,0,0,0,0,0,0,0,128,0,0,0,0,0,128,0,0,0,128,0,0,0,128]
+; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm4 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0]
; AVX512BW-NEXT: vpsrlw $8, %zmm4, %zmm4
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm1[0],zmm3[1],zmm1[1],zmm3[2],zmm1[2],zmm3[3],zmm1[3],zmm3[4],zmm1[4],zmm3[5],zmm1[5],zmm3[6],zmm1[6],zmm3[7],zmm1[7],zmm3[16],zmm1[16],zmm3[17],zmm1[17],zmm3[18],zmm1[18],zmm3[19],zmm1[19],zmm3[20],zmm1[20],zmm3[21],zmm1[21],zmm3[22],zmm1[22],zmm3[23],zmm1[23],zmm3[32],zmm1[32],zmm3[33],zmm1[33],zmm3[34],zmm1[34],zmm3[35],zmm1[35],zmm3[36],zmm1[36],zmm3[37],zmm1[37],zmm3[38],zmm1[38],zmm3[39],zmm1[39],zmm3[48],zmm1[48],zmm3[49],zmm1[49],zmm3[50],zmm1[50],zmm3[51],zmm1[51],zmm3[52],zmm1[52],zmm3[53],zmm1[53],zmm3[54],zmm1[54],zmm3[55],zmm1[55]
-; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [128,0,0,0,128,0,0,0,128,0,0,0,0,0,128,0,0,0,0,0,0,0,0,128,0,128,0,0,0,0,0,0]
+; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3
; AVX512BW-NEXT: vpackuswb %zmm4, %zmm3, %zmm3
; AVX512BW-NEXT: vpaddb %zmm2, %zmm3, %zmm2
diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll
index 6e1bf25..d0bb90c 100644
--- a/llvm/test/CodeGen/X86/vector-mul.ll
+++ b/llvm/test/CodeGen/X86/vector-mul.ll
@@ -130,31 +130,31 @@ define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind {
; X86-SSE2-LABEL: mul_v4i32_1_2_4_8:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,4,8]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [2,u,8,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE2-NEXT: retl
;
; X86-SSE4-LABEL: mul_v4i32_1_2_4_8:
; X86-SSE4: # %bb.0:
-; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,4,8]
; X86-SSE4-NEXT: retl
;
; X64-SSE2-LABEL: mul_v4i32_1_2_4_8:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8]
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2,u,8,u]
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-SSE2-NEXT: retq
;
; X64-SSE4-LABEL: mul_v4i32_1_2_4_8:
; X64-SSE4: # %bb.0:
-; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8]
; X64-SSE4-NEXT: retq
;
; X64-XOP-LABEL: mul_v4i32_1_2_4_8:
@@ -190,12 +190,12 @@ define <4 x i32> @mul_v4i32_1_2_4_8_optsize(<4 x i32> %a0) nounwind optsize {
;
; X86-SSE4-LABEL: mul_v4i32_1_2_4_8_optsize:
; X86-SSE4: # %bb.0:
-; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,4,8]
; X86-SSE4-NEXT: retl
;
; X64-SSE4-LABEL: mul_v4i32_1_2_4_8_optsize:
; X64-SSE4: # %bb.0:
-; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8]
; X64-SSE4-NEXT: retq
;
; X64-XOP-LABEL: mul_v4i32_1_2_4_8_optsize:
@@ -989,7 +989,7 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind {
;
; X64-AVX512DQ-LABEL: mul_v2i64_17_65:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [17,65]
; X64-AVX512DQ-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 17, i64 65>
ret <2 x i64> %1
@@ -999,36 +999,36 @@ define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind {
; X86-SSE2-LABEL: mul_v4i32_5_17_33_65:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [5,17,33,65]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [17,u,65,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE2-NEXT: retl
;
; X86-SSE4-LABEL: mul_v4i32_5_17_33_65:
; X86-SSE4: # %bb.0:
-; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [5,17,33,65]
; X86-SSE4-NEXT: retl
;
; X64-SSE2-LABEL: mul_v4i32_5_17_33_65:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [5,17,33,65]
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [17,u,65,u]
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-SSE2-NEXT: retq
;
; X64-SSE4-LABEL: mul_v4i32_5_17_33_65:
; X64-SSE4: # %bb.0:
-; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [5,17,33,65]
; X64-SSE4-NEXT: retq
;
; X64-AVX-LABEL: mul_v4i32_5_17_33_65:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [5,17,33,65]
; X64-AVX-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 5, i32 17, i32 33, i32 65>
ret <4 x i32> %1
@@ -1384,7 +1384,7 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind {
;
; X64-AVX512DQ-LABEL: mul_v2i64_15_63:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [15,63]
; X64-AVX512DQ-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 15, i64 63>
ret <2 x i64> %1
@@ -1427,7 +1427,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
; X64-SSE2-NEXT: psrlq $32, %xmm3
; X64-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4294967295,4294967295]
; X64-SSE2-NEXT: paddq %xmm3, %xmm0
; X64-SSE2-NEXT: psllq $32, %xmm0
; X64-SSE2-NEXT: paddq %xmm2, %xmm0
@@ -1441,7 +1441,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
; X64-SSE4-NEXT: movdqa %xmm0, %xmm3
; X64-SSE4-NEXT: psrlq $32, %xmm3
; X64-SSE4-NEXT: pmuludq %xmm1, %xmm3
-; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4294967295,4294967295]
; X64-SSE4-NEXT: paddq %xmm3, %xmm0
; X64-SSE4-NEXT: psllq $32, %xmm0
; X64-SSE4-NEXT: paddq %xmm2, %xmm0
@@ -1453,7 +1453,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3
; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4294967295,4294967295]
; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0
; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
@@ -1465,7 +1465,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3
; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4294967295,4294967295]
; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
@@ -1473,7 +1473,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
;
; X64-AVX512DQ-LABEL: mul_v2i64_neg_15_63:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [18446744073709551601,18446744073709551553]
; X64-AVX512DQ-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 -15, i64 -63>
ret <2 x i64> %1
@@ -1516,7 +1516,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
; X64-SSE2-NEXT: psrlq $32, %xmm3
; X64-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4294967295,4294967295]
; X64-SSE2-NEXT: paddq %xmm3, %xmm0
; X64-SSE2-NEXT: psllq $32, %xmm0
; X64-SSE2-NEXT: paddq %xmm2, %xmm0
@@ -1530,7 +1530,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
; X64-SSE4-NEXT: movdqa %xmm0, %xmm3
; X64-SSE4-NEXT: psrlq $32, %xmm3
; X64-SSE4-NEXT: pmuludq %xmm1, %xmm3
-; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4294967295,4294967295]
; X64-SSE4-NEXT: paddq %xmm3, %xmm0
; X64-SSE4-NEXT: psllq $32, %xmm0
; X64-SSE4-NEXT: paddq %xmm2, %xmm0
@@ -1542,7 +1542,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3
; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4294967295,4294967295]
; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0
; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
@@ -1554,7 +1554,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3
; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4294967295,4294967295]
; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
@@ -1562,7 +1562,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
;
; X64-AVX512DQ-LABEL: mul_v2i64_neg_17_65:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [18446744073709551599,18446744073709551551]
; X64-AVX512DQ-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 -17, i64 -65>
ret <2 x i64> %1
@@ -1600,7 +1600,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
; X86-SSE2-NEXT: psrlq $32, %xmm3
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,0,0,u,u,u,u,255,255,255,255,u,u,u,u]
; X86-SSE2-NEXT: paddq %xmm3, %xmm0
; X86-SSE2-NEXT: psllq $32, %xmm0
; X86-SSE2-NEXT: paddq %xmm2, %xmm0
@@ -1614,7 +1614,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
; X86-SSE4-NEXT: movdqa %xmm0, %xmm3
; X86-SSE4-NEXT: psrlq $32, %xmm3
; X86-SSE4-NEXT: pmuludq %xmm1, %xmm3
-; X86-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,0,0,u,u,u,u,255,255,255,255,u,u,u,u]
; X86-SSE4-NEXT: paddq %xmm3, %xmm0
; X86-SSE4-NEXT: psllq $32, %xmm0
; X86-SSE4-NEXT: paddq %xmm2, %xmm0
@@ -1628,7 +1628,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
; X64-SSE2-NEXT: psrlq $32, %xmm3
; X64-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0]
; X64-SSE2-NEXT: paddq %xmm3, %xmm0
; X64-SSE2-NEXT: psllq $32, %xmm0
; X64-SSE2-NEXT: paddq %xmm2, %xmm0
@@ -1642,7 +1642,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
; X64-SSE4-NEXT: movdqa %xmm0, %xmm3
; X64-SSE4-NEXT: psrlq $32, %xmm3
; X64-SSE4-NEXT: pmuludq %xmm1, %xmm3
-; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0]
; X64-SSE4-NEXT: paddq %xmm3, %xmm0
; X64-SSE4-NEXT: psllq $32, %xmm0
; X64-SSE4-NEXT: paddq %xmm2, %xmm0
@@ -1654,7 +1654,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3
; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0]
; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0
; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
@@ -1666,7 +1666,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3
; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0]
; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
@@ -1674,7 +1674,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
;
; X64-AVX512DQ-LABEL: mul_v2i64_neg_0_1:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255]
; X64-AVX512DQ-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 0, i64 -1>
ret <2 x i64> %1
@@ -1689,7 +1689,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
; X86-SSE2-NEXT: psrlq $32, %xmm3
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,0,0,u,u,u,u,255,255,255,255,u,u,u,u]
; X86-SSE2-NEXT: paddq %xmm3, %xmm0
; X86-SSE2-NEXT: psllq $32, %xmm0
; X86-SSE2-NEXT: paddq %xmm2, %xmm0
@@ -1703,7 +1703,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
; X86-SSE4-NEXT: movdqa %xmm0, %xmm3
; X86-SSE4-NEXT: psrlq $32, %xmm3
; X86-SSE4-NEXT: pmuludq %xmm1, %xmm3
-; X86-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,0,0,u,u,u,u,255,255,255,255,u,u,u,u]
; X86-SSE4-NEXT: paddq %xmm3, %xmm0
; X86-SSE4-NEXT: psllq $32, %xmm0
; X86-SSE4-NEXT: paddq %xmm2, %xmm0
@@ -1717,7 +1717,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
; X64-SSE2-NEXT: psrlq $32, %xmm3
; X64-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0]
; X64-SSE2-NEXT: paddq %xmm3, %xmm0
; X64-SSE2-NEXT: psllq $32, %xmm0
; X64-SSE2-NEXT: paddq %xmm2, %xmm0
@@ -1731,7 +1731,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
; X64-SSE4-NEXT: movdqa %xmm0, %xmm3
; X64-SSE4-NEXT: psrlq $32, %xmm3
; X64-SSE4-NEXT: pmuludq %xmm1, %xmm3
-; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0]
; X64-SSE4-NEXT: paddq %xmm3, %xmm0
; X64-SSE4-NEXT: psllq $32, %xmm0
; X64-SSE4-NEXT: paddq %xmm2, %xmm0
@@ -1743,7 +1743,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3
; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0]
; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0
; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
@@ -1755,7 +1755,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3
; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0]
; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
@@ -1763,7 +1763,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
;
; X64-AVX512DQ-LABEL: mul_v2i64_15_neg_63:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [15,18446744073709551553]
; X64-AVX512DQ-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 15, i64 -63>
ret <2 x i64> %1
@@ -1773,36 +1773,36 @@ define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind {
; X86-SSE2-LABEL: mul_v4i32_0_15_31_7:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,15,31,7]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [15,u,7,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE2-NEXT: retl
;
; X86-SSE4-LABEL: mul_v4i32_0_15_31_7:
; X86-SSE4: # %bb.0:
-; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,15,31,7]
; X86-SSE4-NEXT: retl
;
; X64-SSE2-LABEL: mul_v4i32_0_15_31_7:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,15,31,7]
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [15,u,7,u]
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-SSE2-NEXT: retq
;
; X64-SSE4-LABEL: mul_v4i32_0_15_31_7:
; X64-SSE4: # %bb.0:
-; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,15,31,7]
; X64-SSE4-NEXT: retq
;
; X64-AVX-LABEL: mul_v4i32_0_15_31_7:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,15,31,7]
; X64-AVX-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 0, i32 15, i32 31, i32 7>
ret <4 x i32> %1
@@ -1947,7 +1947,7 @@ define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind {
;
; X64-AVX512DQ-LABEL: mul_v2i64_68_132:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [68,132]
; X64-AVX512DQ-NEXT: retq
%mul = mul <2 x i64> %x, <i64 68, i64 132>
ret <2 x i64> %mul
@@ -2009,7 +2009,7 @@ define <2 x i64> @mul_v2i64_60_120(<2 x i64> %x) nounwind {
;
; X64-AVX512DQ-LABEL: mul_v2i64_60_120:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [60,124]
; X64-AVX512DQ-NEXT: retq
%mul = mul <2 x i64> %x, <i64 60, i64 124>
ret <2 x i64> %mul
diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll
index 93f4ce7..0bf5a8d 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-128.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll
@@ -1092,9 +1092,9 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: constant_rotate_v4i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,128,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1106,8 +1106,8 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; SSE41-LABEL: constant_rotate_v4i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,128,u]
+; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1118,8 +1118,8 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; AVX1-LABEL: constant_rotate_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32,u,128,u]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
@@ -1156,9 +1156,9 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; X86-SSE2-LABEL: constant_rotate_v4i32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,64,128]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [32,u,128,u]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll
index 64c3118..5ae3e2f 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-256.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll
@@ -895,13 +895,13 @@ define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: constant_rotate_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32,u,128,u]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [512,u,2048,u]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [256,512,1024,2048]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovshdup {{.*#+}} ymm2 = ymm0[1,1,3,3,5,5,7,7]
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
index 99dac74..3085c32 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -987,21 +987,21 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: constant_shift_v4i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,128,u]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v4i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128]
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v4i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128]
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v4i32:
@@ -1032,9 +1032,9 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; X86-SSE-LABEL: constant_shift_v4i32:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,64,128]
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [32,u,128,u]
; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
index b56a8b5..f9ccd1e 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -1117,9 +1117,9 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: constant_shift_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [16,32,64,128]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [256,512,256,128]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -1153,9 +1153,9 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
;
; X86-AVX1-LABEL: constant_shift_v8i32:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
+; X86-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 # [16,32,64,128]
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; X86-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [256,512,256,128]
; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X86-AVX1-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
index 0e20b18..18d79b6 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
@@ -778,9 +778,9 @@ define <16 x i8> @combine_shl_pshufb(<4 x i32> %a0) {
; SSSE3-LABEL: combine_shl_pshufb:
; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSSE3-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSSE3-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,256,65536,65536]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSSE3-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSSE3-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [256,u,65536,u]
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,2,3,0,5,6,7,4,9,10,11,8,12,13,14,15]
@@ -788,13 +788,13 @@ define <16 x i8> @combine_shl_pshufb(<4 x i32> %a0) {
;
; SSE41-LABEL: combine_shl_pshufb:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,256,65536,65536]
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,2,3,0,5,6,7,4,9,10,11,8,12,13,14,15]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_shl_pshufb:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,256,65536,65536]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,2,3,0,5,6,7,4,9,10,11,8,12,13,14,15]
; AVX1-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll
index 1af7542..4235377 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-math.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll
@@ -2110,7 +2110,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm2, %xmm2
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2,3]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
@@ -2119,7 +2119,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,2,3]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -2127,7 +2127,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX2-SLOW-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,2,3]
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -2135,7 +2135,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-FAST-ALL: # %bb.0:
; AVX2-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,2,4,6,0,0,0,0]
; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX2-FAST-ALL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-ALL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,2,3]
; AVX2-FAST-ALL-NEXT: vzeroupper
; AVX2-FAST-ALL-NEXT: retq
;
@@ -2143,7 +2143,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-FAST-PERLANE: # %bb.0:
; AVX2-FAST-PERLANE-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX2-FAST-PERLANE-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-PERLANE-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,2,3]
; AVX2-FAST-PERLANE-NEXT: vzeroupper
; AVX2-FAST-PERLANE-NEXT: retq
;
@@ -2151,7 +2151,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,2,3]
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
@@ -2253,13 +2253,13 @@ define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-LABEL: trunc_mul_const_v16i64_v16i8:
; SSE: # %bb.0:
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2,3]
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [4,5]
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [6,7]
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [8,9]
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5 # [10,11]
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6 # [12,13]
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7 # [14,15]
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; SSE-NEXT: pand %xmm8, %xmm7
; SSE-NEXT: pand %xmm8, %xmm6
@@ -2280,18 +2280,18 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
;
; AVX1-LABEL: trunc_mul_const_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm4
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm4 # [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm5
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2,3]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm5 # [4,5]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm6
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [6,7]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm6 # [8,9]
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm7
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [10,11]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm7 # [12,13]
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [14,15]
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm8 = [255,255]
; AVX1-NEXT: vpand %xmm3, %xmm8, %xmm3
; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm7
@@ -2313,10 +2313,10 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
;
; AVX2-LABEL: trunc_mul_const_v16i64_v16i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,1,2,3]
+; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [4,5,6,7]
+; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [8,9,10,11]
+; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [12,13,14,15]
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
@@ -2335,8 +2335,8 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
;
; AVX512F-LABEL: trunc_mul_const_v16i64_v16i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
-; AVX512F-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,1,2,3,4,5,6,7]
+; AVX512F-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [8,9,10,11,12,13,14,15]
; AVX512F-NEXT: vpmovqb %zmm1, %xmm1
; AVX512F-NEXT: vpmovqb %zmm0, %xmm0
; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -2345,8 +2345,8 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
;
; AVX512BW-LABEL: trunc_mul_const_v16i64_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
-; AVX512BW-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpmovqb %zmm1, %xmm1
; AVX512BW-NEXT: vpmovqb %zmm0, %xmm0
; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -2355,8 +2355,8 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
;
; AVX512DQ-LABEL: trunc_mul_const_v16i64_v16i8:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,1,2,3,4,5,6,7]
+; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [8,9,10,11,12,13,14,15]
; AVX512DQ-NEXT: vpmovqb %zmm1, %xmm1
; AVX512DQ-NEXT: vpmovqb %zmm0, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -2371,27 +2371,27 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-LABEL: trunc_mul_const_v16i32_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,1,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [1,u,3,u]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [5,u,7,u]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [8,9,10,11]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [9,u,11,u]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [12,13,14,15]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [13,u,15,u]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
@@ -2406,12 +2406,12 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
;
; AVX1-LABEL: trunc_mul_const_v16i32_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [0,1,2,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4,5,6,7]
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3 # [8,9,10,11]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [12,13,14,15]
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255]
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
@@ -2425,8 +2425,8 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_mul_const_v16i32_v16i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [8,9,10,11,12,13,14,15]
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -2439,7 +2439,7 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_mul_const_v16i32_v16i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll
index 17315c4..ac330a7 100644
--- a/llvm/test/CodeGen/X86/vselect-avx.ll
+++ b/llvm/test/CodeGen/X86/vselect-avx.ll
@@ -95,7 +95,7 @@ bb:
define void @test3(<4 x i32> %induction30, ptr %tmp16, ptr %tmp17, <4 x i16> %tmp3, <4 x i16> %tmp12) {
; AVX1-LABEL: test3:
; AVX1: ## %bb.0:
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ## [2863311531,2863311531,2863311531,2863311531]
; AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
@@ -151,23 +151,19 @@ define <32 x i8> @PR22706(<32 x i1> %x) {
; AVX1: ## %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
-; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR22706:
; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/vselect-pcmp.ll b/llvm/test/CodeGen/X86/vselect-pcmp.ll
index 8543e9f..16700d4 100644
--- a/llvm/test/CodeGen/X86/vselect-pcmp.ll
+++ b/llvm/test/CodeGen/X86/vselect-pcmp.ll
@@ -1046,7 +1046,7 @@ define <2 x i64> @blend_mask_cond_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %z
define <4 x i32> @blend_mask_cond_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; AVX1-LABEL: blend_mask_cond_v4i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [32768,4194304,1073741824,2147483648]
; AVX1-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
; AVX1-NEXT: retq
;
@@ -1211,9 +1211,9 @@ define <4 x i64> @blend_mask_cond_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %z
define <8 x i32> @blend_mask_cond_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z) {
; AVX1-LABEL: blend_mask_cond_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3 # [2147483648,1073741824,268435456,536870912]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,2097152,1073741824,524288]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
; AVX1-NEXT: vblendvps %ymm0, %ymm2, %ymm1, %ymm0
; AVX1-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/zero-call-used-regs-simd.ll b/llvm/test/CodeGen/X86/zero-call-used-regs-simd.ll
new file mode 100644
index 0000000..d9253e0
--- /dev/null
+++ b/llvm/test/CodeGen/X86/zero-call-used-regs-simd.ll
@@ -0,0 +1,216 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 -verify-machineinstrs | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -verify-machineinstrs | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 -verify-machineinstrs | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512vl -verify-machineinstrs | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512vl,+avx512bw -verify-machineinstrs | FileCheck %s --check-prefixes=AVX512,AVX512BW
+
+define void @zero_xmm(<4 x i32> %arg) #0 {
+; SSE-LABEL: zero_xmm:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm0, 0
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: zero_xmm:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm0, 0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: zero_xmm:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovaps %xmm0, 0
+; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ store <4 x i32> %arg, ptr null, align 32
+ ret void
+}
+
+define void @zero_ymm(<8 x i32> %arg) #0 {
+; SSE-LABEL: zero_ymm:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, 16
+; SSE-NEXT: movaps %xmm0, 0
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: zero_ymm:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %ymm0, 0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: zero_ymm:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovaps %ymm0, 0
+; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ store <8 x i32> %arg, ptr null, align 32
+ ret void
+}
+
+define void @zero_zmm(<16 x i32> %arg) #0 {
+; SSE-LABEL: zero_zmm:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm3, 48
+; SSE-NEXT: movaps %xmm2, 32
+; SSE-NEXT: movaps %xmm1, 16
+; SSE-NEXT: movaps %xmm0, 0
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: xorps %xmm2, %xmm2
+; SSE-NEXT: xorps %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX-LABEL: zero_zmm:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %ymm1, 32
+; AVX-NEXT: vmovaps %ymm0, 0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: zero_zmm:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovups %zmm0, 0
+; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ store <16 x i32> %arg, ptr null, align 32
+ ret void
+}
+
+define void @zero_k(<8 x i32> %arg, <8 x i1> %mask) #0 {
+; SSE-LABEL: zero_k:
+; SSE: # %bb.0:
+; SSE-NEXT: psllw $15, %xmm2
+; SSE-NEXT: packsswb %xmm2, %xmm2
+; SSE-NEXT: pmovmskb %xmm2, %eax
+; SSE-NEXT: testb $1, %al
+; SSE-NEXT: jne .LBB3_1
+; SSE-NEXT: # %bb.2: # %else
+; SSE-NEXT: testb $2, %al
+; SSE-NEXT: jne .LBB3_3
+; SSE-NEXT: .LBB3_4: # %else2
+; SSE-NEXT: testb $4, %al
+; SSE-NEXT: jne .LBB3_5
+; SSE-NEXT: .LBB3_6: # %else4
+; SSE-NEXT: testb $8, %al
+; SSE-NEXT: jne .LBB3_7
+; SSE-NEXT: .LBB3_8: # %else6
+; SSE-NEXT: testb $16, %al
+; SSE-NEXT: jne .LBB3_9
+; SSE-NEXT: .LBB3_10: # %else8
+; SSE-NEXT: testb $32, %al
+; SSE-NEXT: jne .LBB3_11
+; SSE-NEXT: .LBB3_12: # %else10
+; SSE-NEXT: testb $64, %al
+; SSE-NEXT: jne .LBB3_13
+; SSE-NEXT: .LBB3_14: # %else12
+; SSE-NEXT: testb $-128, %al
+; SSE-NEXT: je .LBB3_16
+; SSE-NEXT: .LBB3_15: # %cond.store13
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,3,3,3]
+; SSE-NEXT: movd %xmm0, 28
+; SSE-NEXT: .LBB3_16: # %else14
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: pxor %xmm0, %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: retq
+; SSE-NEXT: .LBB3_1: # %cond.store
+; SSE-NEXT: movd %xmm0, 0
+; SSE-NEXT: testb $2, %al
+; SSE-NEXT: je .LBB3_4
+; SSE-NEXT: .LBB3_3: # %cond.store1
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
+; SSE-NEXT: movd %xmm2, 4
+; SSE-NEXT: testb $4, %al
+; SSE-NEXT: je .LBB3_6
+; SSE-NEXT: .LBB3_5: # %cond.store3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE-NEXT: movd %xmm2, 8
+; SSE-NEXT: testb $8, %al
+; SSE-NEXT: je .LBB3_8
+; SSE-NEXT: .LBB3_7: # %cond.store5
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; SSE-NEXT: movd %xmm0, 12
+; SSE-NEXT: testb $16, %al
+; SSE-NEXT: je .LBB3_10
+; SSE-NEXT: .LBB3_9: # %cond.store7
+; SSE-NEXT: movd %xmm1, 16
+; SSE-NEXT: testb $32, %al
+; SSE-NEXT: je .LBB3_12
+; SSE-NEXT: .LBB3_11: # %cond.store9
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
+; SSE-NEXT: movd %xmm0, 20
+; SSE-NEXT: testb $64, %al
+; SSE-NEXT: je .LBB3_14
+; SSE-NEXT: .LBB3_13: # %cond.store11
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE-NEXT: movd %xmm0, 24
+; SSE-NEXT: testb $-128, %al
+; SSE-NEXT: jne .LBB3_15
+; SSE-NEXT: jmp .LBB3_16
+;
+; AVX1-LABEL: zero_k:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpslld $31, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vmaskmovps %ymm0, %ymm1, 0
+; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zero_k:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX2-NEXT: vpmaskmovd %ymm0, %ymm1, 0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: zero_k:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, 0 {%k1}
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: kxorw %k0, %k0, %k1
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: zero_k:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $15, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmovw2m %xmm1, %k1
+; AVX512BW-NEXT: vmovdqa32 %ymm0, 0 {%k1}
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX512BW-NEXT: kxorq %k0, %k0, %k1
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ tail call void @llvm.masked.store.v8i32.p0(<8 x i32> %arg, ptr null, i32 32, <8 x i1> %mask)
+ ret void
+}
+
+attributes #0 = { "zero-call-used-regs"="used" }
diff --git a/llvm/test/DebugInfo/Generic/compileunit-source-language-name.ll b/llvm/test/DebugInfo/Generic/compileunit-source-language-name.ll
index e2b6167..c8cc871 100644
--- a/llvm/test/DebugInfo/Generic/compileunit-source-language-name.ll
+++ b/llvm/test/DebugInfo/Generic/compileunit-source-language-name.ll
@@ -1,6 +1,10 @@
; RUN: %llc_dwarf -filetype=obj -O0 < %s | llvm-dwarfdump -debug-info - | FileCheck %s --implicit-check-not "DW_AT_language"
-; CHECK: DW_AT_language_name (DW_LNAME_ObjC_plus_plus)
+; CHECK: DW_AT_language_name (DW_LNAME_ObjC_plus_plus)
+; CHECK: DW_AT_language_name (DW_LNAME_C_plus_plus)
+; CHECK: DW_AT_language_version (201100)
+; CHECK: DW_AT_language_name (DW_LNAME_Rust)
+; CHECK-NOT: DW_AT_language_version
@x = global i32 0, align 4, !dbg !0
@@ -9,7 +13,7 @@ define void @_Z4funcv() !dbg !8 {
ret void, !dbg !11
}
-!llvm.dbg.cu = !{!2}
+!llvm.dbg.cu = !{!2, !12, !13}
!llvm.module.flags = !{!6, !7}
!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
@@ -24,3 +28,5 @@ define void @_Z4funcv() !dbg !8 {
!9 = !DISubroutineType(types: !10)
!10 = !{null}
!11 = !DILocation(line: 2, column: 14, scope: !8)
+!12 = distinct !DICompileUnit(sourceLanguageName: DW_LNAME_C_plus_plus, sourceLanguageVersion: 201100, file: !3, producer: "handwritten", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, globals: !4, splitDebugInlining: false, nameTableKind: Apple, sysroot: "/")
+!13 = distinct !DICompileUnit(sourceLanguageName: DW_LNAME_Rust, sourceLanguageVersion: 0, file: !3, producer: "handwritten", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, globals: !4, splitDebugInlining: false, nameTableKind: Apple, sysroot: "/")
diff --git a/llvm/test/DebugInfo/X86/shrink-wrap-frame-setup-no-loc.mir b/llvm/test/DebugInfo/X86/shrink-wrap-frame-setup-no-loc.mir
new file mode 100644
index 0000000..b97e916
--- /dev/null
+++ b/llvm/test/DebugInfo/X86/shrink-wrap-frame-setup-no-loc.mir
@@ -0,0 +1,99 @@
+# RUN: %llc_dwarf %s -o - -mtriple=x86_64-unknown-unknown --start-after=livedebugvalues | FileCheck %s
+
+## Check the line number from the ret above `.LBB0_2` doesn't leak onto the
+## frame setup instructions in the `.LBB0_2` block; `pushq %rax` should
+## explicitly get set to line zero.
+
+# CHECK: loop:
+# CHECK-NEXT: .Lfunc_begin0:
+# CHECK-NEXT: .cfi_startproc
+# CHECK-NEXT: # %bb.0:
+# CHECK-NEXT: .file 1 "/" "test.c"
+# CHECK-NEXT: .loc 1 5 16 prologue_end # test.c:5:16
+# CHECK-NEXT: testq %rax, %rax
+# CHECK-NEXT: je .LBB0_2
+# CHECK-NEXT: # %bb.1:
+# CHECK-NEXT: .loc 1 5 16 # test.c:5:16
+# CHECK-NEXT: retq
+# CHECK-NEXT: .LBB0_2:
+# -- Check the .loc below sets the current location to line 0.
+# CHECK-NEXT: .loc 1 0 16 is_stmt 0 # test.c:0:16
+# CHECK-NEXT: pushq %rax
+# CHECK-NEXT: .cfi_def_cfa_offset 16
+# CHECK-NEXT: addq $8, %rsp
+# CHECK-NEXT: .cfi_def_cfa_offset 8
+# CHECK-NEXT: .loc 1 5 16 is_stmt 1 # test.c:5:16
+# CHECK-NEXT: retq
+
+--- |
+ source_filename = "reduced.ll"
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-unknown"
+
+ define void @loop(i64 %i) !dbg !4 {
+ entry:
+ %cmp.not = icmp eq i64 %i, 0, !dbg !7
+ br i1 %cmp.not, label %for.body, label %for.end
+
+ for.body: ; preds = %entry
+ %puts10 = tail call i32 null(ptr null)
+ %inc = add i64 0, 0
+ br label %for.end
+
+ for.end: ; preds = %for.body, %entry
+ ret void
+ }
+
+ !llvm.dbg.cu = !{!0}
+ !llvm.module.flags = !{!3}
+
+ !0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 22.0.0git", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !2, splitDebugInlining: false, nameTableKind: None)
+ !1 = !DIFile(filename: "test.c", directory: "/")
+ !2 = !{}
+ !3 = !{i32 2, !"Debug Info Version", i32 3}
+ !4 = distinct !DISubprogram(name: "loop", scope: !1, file: !1, line: 4, type: !5, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2, keyInstructions: true)
+ !5 = !DISubroutineType(types: !6)
+ !6 = !{null}
+ !7 = !DILocation(line: 5, column: 16, scope: !8, atomGroup: 720, atomRank: 2)
+ !8 = distinct !DILexicalBlock(scope: !4, file: !1, line: 5, column: 9)
+...
+---
+name: loop
+alignment: 16
+tracksRegLiveness: true
+noPhis: true
+isSSA: false
+noVRegs: true
+hasFakeUses: false
+debugInstrRef: true
+tracksDebugUserValues: true
+liveins:
+ - { reg: '$rdi' }
+frameInfo:
+ stackSize: 8
+ offsetAdjustment: -8
+ maxAlignment: 1
+ adjustsStack: true
+ hasCalls: true
+ maxCallFrameSize: 0
+ isCalleeSavedInfoValid: true
+machineFunctionInfo:
+ amxProgModel: None
+body: |
+ bb.0:
+ successors: %bb.1(0x30000000), %bb.2(0x50000000)
+ liveins: $rdi
+
+ TEST64rr undef renamable $rax, undef renamable $rax, implicit-def $eflags, debug-location !7
+ JCC_1 %bb.1, 4, implicit $eflags
+
+ bb.2:
+ RET64 debug-location !7
+
+ bb.1:
+ frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
+ frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ $rsp = frame-destroy ADD64ri32 $rsp, 8, implicit-def dead $eflags
+ frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+ RET64 debug-location !7
+...
diff --git a/llvm/test/Instrumentation/AddressSanitizer/asan-win-dont-instrument-catchpad.ll b/llvm/test/Instrumentation/AddressSanitizer/asan-win-dont-instrument-catchpad.ll
new file mode 100644
index 0000000..e38da0b
--- /dev/null
+++ b/llvm/test/Instrumentation/AddressSanitizer/asan-win-dont-instrument-catchpad.ll
@@ -0,0 +1,63 @@
+; RUN: opt < %s -passes=asan -S | FileCheck %s
+; CHECK: %ex = alloca i32, align 4
+; CHECK: catchpad within %{{.*}} [ptr @"??_R0H@8", i32 0, ptr %ex]
+
+; This test ensures that catch parameters are not instrumented on Windows.
+
+; This file was generated using the following source
+;
+; ```C++
+; #include <exception>
+; #include <cstdio>
+;
+; int main() {
+; try {
+; throw 1;
+; } catch (const int ex) {
+; printf("%d\n", ex);
+; return -1;
+; }
+; return 0;
+; }
+;
+; ```
+; then running the following sequence of commands
+;
+; ```
+; clang.exe -g0 -O0 -emit-llvm -c main.cpp -o main.bc
+; llvm-extract.exe -func=main main.bc -o main_func.bc
+; llvm-dis.exe main_func.bc -o main_func_dis.ll
+; ```
+; and finally manually trimming the resulting `.ll` file to remove
+; unnecessary metadata, and manually adding the `sanitize_address` annotation;
+; needed for the ASan pass to run.
+
+target triple = "x86_64-pc-windows-msvc"
+
+@"??_R0H@8" = external global ptr
+
+; Function Attrs: sanitize_address
+define i32 @main() sanitize_address personality ptr @__CxxFrameHandler3 {
+entry:
+ %ex = alloca i32, align 4
+ invoke void @throw()
+ to label %unreachable unwind label %catch.dispatch
+
+catch.dispatch: ; preds = %entry
+ %0 = catchswitch within none [label %catch] unwind to caller
+
+catch: ; preds = %catch.dispatch
+ %1 = catchpad within %0 [ptr @"??_R0H@8", i32 0, ptr %ex]
+ call void @opaque() [ "funclet"(token %1) ]
+ catchret from %1 to label %return
+
+return: ; preds = %catch
+ ret i32 0
+
+unreachable: ; preds = %entry
+ unreachable
+}
+
+declare void @throw() noreturn
+declare void @opaque()
+declare i32 @__CxxFrameHandler3(...)
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vds_alias.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vds_alias.s
index 5b6bb47..83313a2 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vds_alias.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vds_alias.s
@@ -5,3 +5,15 @@ ds_load_tr_b64 v[2:3], v0
ds_load_tr_b128 v[2:5], v0
// GFX1250: ds_load_tr16_b128 v[2:5], v0 ; encoding: [0x00,0x00,0xf0,0xdb,0x00,0x00,0x00,0x02]
+
+ds_load_b128_tr_b16 v[2:5], v0
+// GFX1250: ds_load_tr16_b128 v[2:5], v0 ; encoding: [0x00,0x00,0xf0,0xdb,0x00,0x00,0x00,0x02]
+
+ds_load_b64_tr_b8 v[2:3], v0
+// GFX1250: ds_load_tr8_b64 v[2:3], v0 ; encoding: [0x00,0x00,0xf4,0xdb,0x00,0x00,0x00,0x02]
+
+ds_load_b64_tr_b4 v[2:3], v0
+// GFX1250: ds_load_tr4_b64 v[2:3], v0 ; encoding: [0x00,0x00,0xe8,0xdb,0x00,0x00,0x00,0x02]
+
+ds_load_tr6_b96 v[2:4], v0
+// GFX1250: ds_load_tr6_b96 v[2:4], v0 ; encoding: [0x00,0x00,0xec,0xdb,0x00,0x00,0x00,0x02]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vflat_alias.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vflat_alias.s
index 6b2dd67..f983bc0 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vflat_alias.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vflat_alias.s
@@ -35,3 +35,78 @@ global_load_tr_b128 v[2:5], v[6:7], off offset:64
global_load_tr_b128 v[2:5], v[6:7], off offset:-64
// GFX1250: global_load_tr16_b128 v[2:5], v[6:7], off offset:-64 ; encoding: [0x7c,0xc0,0x15,0xee,0x02,0x00,0x00,0x00,0x06,0xc0,0xff,0xff]
+
+global_load_b64_tr_b8 v[2:3], v0, s[0:1]
+// GFX1250: global_load_tr8_b64 v[2:3], v0, s[0:1] ; encoding: [0x00,0x00,0x16,0xee,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_load_b64_tr_b8 v[2:3], v0, s[0:1] offset:64
+// GFX1250: global_load_tr8_b64 v[2:3], v0, s[0:1] offset:64 ; encoding: [0x00,0x00,0x16,0xee,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b64_tr_b8 v[2:3], v0, s[0:1] offset:-64
+// GFX1250: global_load_tr8_b64 v[2:3], v0, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x16,0xee,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b64_tr_b8 v[2:3], v[4:5], off
+// GFX1250: global_load_tr8_b64 v[2:3], v[4:5], off ; encoding: [0x7c,0x00,0x16,0xee,0x02,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_b64_tr_b8 v[2:3], v[4:5], off offset:64
+// GFX1250: global_load_tr8_b64 v[2:3], v[4:5], off offset:64 ; encoding: [0x7c,0x00,0x16,0xee,0x02,0x00,0x00,0x00,0x04,0x40,0x00,0x00]
+
+global_load_b64_tr_b8 v[2:3], v[4:5], off offset:-64
+// GFX1250: global_load_tr8_b64 v[2:3], v[4:5], off offset:-64 ; encoding: [0x7c,0x00,0x16,0xee,0x02,0x00,0x00,0x00,0x04,0xc0,0xff,0xff]
+
+global_load_b128_tr_b16 v[2:5], v0, s[0:1]
+// GFX1250: global_load_tr16_b128 v[2:5], v0, s[0:1] ; encoding: [0x00,0xc0,0x15,0xee,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_load_b128_tr_b16 v[2:5], v0, s[0:1] offset:64
+// GFX1250: global_load_tr16_b128 v[2:5], v0, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x15,0xee,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b128_tr_b16 v[2:5], v0, s[0:1] offset:-64
+// GFX1250: global_load_tr16_b128 v[2:5], v0, s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x15,0xee,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b128_tr_b16 v[2:5], v[6:7], off
+// GFX1250: global_load_tr16_b128 v[2:5], v[6:7], off ; encoding: [0x7c,0xc0,0x15,0xee,0x02,0x00,0x00,0x00,0x06,0x00,0x00,0x00]
+
+global_load_b128_tr_b16 v[2:5], v[6:7], off offset:64
+// GFX1250: global_load_tr16_b128 v[2:5], v[6:7], off offset:64 ; encoding: [0x7c,0xc0,0x15,0xee,0x02,0x00,0x00,0x00,0x06,0x40,0x00,0x00]
+
+global_load_b128_tr_b16 v[2:5], v[6:7], off offset:-64
+// GFX1250: global_load_tr16_b128 v[2:5], v[6:7], off offset:-64 ; encoding: [0x7c,0xc0,0x15,0xee,0x02,0x00,0x00,0x00,0x06,0xc0,0xff,0xff]
+
+global_load_b64_tr_b4 v[2:3], v0, s[0:1]
+// GFX1250: global_load_tr4_b64 v[2:3], v0, s[0:1] ; encoding: [0x00,0xc0,0x1c,0xee,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_load_b64_tr_b4 v[2:3], v0, s[0:1] offset:64
+// GFX1250: global_load_tr4_b64 v[2:3], v0, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x1c,0xee,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b64_tr_b4 v[2:3], v0, s[0:1] offset:-64
+// GFX1250: global_load_tr4_b64 v[2:3], v0, s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x1c,0xee,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b64_tr_b4 v[2:3], v[4:5], off
+// GFX1250: global_load_tr4_b64 v[2:3], v[4:5], off ; encoding: [0x7c,0xc0,0x1c,0xee,0x02,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_b64_tr_b4 v[2:3], v[4:5], off offset:64
+// GFX1250: global_load_tr4_b64 v[2:3], v[4:5], off offset:64 ; encoding: [0x7c,0xc0,0x1c,0xee,0x02,0x00,0x00,0x00,0x04,0x40,0x00,0x00]
+
+global_load_b64_tr_b4 v[2:3], v[4:5], off offset:-64
+// GFX1250: global_load_tr4_b64 v[2:3], v[4:5], off offset:-64 ; encoding: [0x7c,0xc0,0x1c,0xee,0x02,0x00,0x00,0x00,0x04,0xc0,0xff,0xff]
+
+global_load_b96_tr_b6 v[2:4], v0, s[0:1]
+// GFX1250: global_load_tr6_b96 v[2:4], v0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xee,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_load_b96_tr_b6 v[3:5], v0, s[0:1]
+// GFX1250: global_load_tr6_b96 v[3:5], v0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xee,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_load_b96_tr_b6 v[2:4], v0, s[0:1] offset:64
+// GFX1250: global_load_tr6_b96 v[2:4], v0, s[0:1] offset:64 ; encoding: [0x00,0x00,0x1d,0xee,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b96_tr_b6 v[2:4], v0, s[0:1] offset:-64
+// GFX1250: global_load_tr6_b96 v[2:4], v0, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x1d,0xee,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b96_tr_b6 v[2:4], v[6:7], off
+// GFX1250: global_load_tr6_b96 v[2:4], v[6:7], off ; encoding: [0x7c,0x00,0x1d,0xee,0x02,0x00,0x00,0x00,0x06,0x00,0x00,0x00]
+
+global_load_b96_tr_b6 v[2:4], v[6:7], off offset:64
+// GFX1250: global_load_tr6_b96 v[2:4], v[6:7], off offset:64 ; encoding: [0x7c,0x00,0x1d,0xee,0x02,0x00,0x00,0x00,0x06,0x40,0x00,0x00]
+
+global_load_b96_tr_b6 v[2:4], v[6:7], off offset:-64
+// GFX1250: global_load_tr6_b96 v[2:4], v[6:7], off offset:-64 ; encoding: [0x7c,0x00,0x1d,0xee,0x02,0x00,0x00,0x00,0x06,0xc0,0xff,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx8_vop3cx_nowarn.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx8_vop3cx_nowarn.txt
new file mode 100644
index 0000000..d4888ad
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx8_vop3cx_nowarn.txt
@@ -0,0 +1,422 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=tonga -disassemble -show-encoding < %s | FileCheck -strict-whitespace %s
+
+# In GFX10+, v_cmpx_* use EXEC as the implicit dst. The disassembler issues a warning when the dst
+# is not 0x7e (EXEC). In GFX9 and earlier, these instructions have explicit dst. Therefore, such
+# warnings should not be issued.
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 flat_scratch, v1, v2 ; encoding: [0x66,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x66,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 vcc, v1, v2 ; encoding: [0x6a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x6a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 tba, v1, v2 ; encoding: [0x6c,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x6c,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 tma, v1, v2 ; encoding: [0x6e,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x6e,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 ttmp[10:11], v1, v2 ; encoding: [0x7a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x7a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f64_e64 s[0:1], v[1:2], v2 ; encoding: [0x00,0x00,0x13,0xd0,0x01,0x05,0x02,0x00]
+0x00,0x00,0x13,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f16_e64 s[2:3], v1, v2 ; encoding: [0x02,0x00,0x15,0xd0,0x01,0x05,0x02,0x00]
+0x02,0x00,0x15,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f16_e64 s[4:5], v1, v2 ; encoding: [0x04,0x00,0x30,0xd0,0x01,0x05,0x02,0x00]
+0x04,0x00,0x30,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_f16_e64 s[6:7], v1, v2 ; encoding: [0x06,0x00,0x31,0xd0,0x01,0x05,0x02,0x00]
+0x06,0x00,0x31,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f16_e64 s[8:9], v1, v2 ; encoding: [0x08,0x00,0x32,0xd0,0x01,0x05,0x02,0x00]
+0x08,0x00,0x32,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0x33,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0x33,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f16_e64 s[12:13], v1, v2 ; encoding: [0x0c,0x00,0x34,0xd0,0x01,0x05,0x02,0x00]
+0x0c,0x00,0x34,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f16_e64 s[14:15], v1, v2 ; encoding: [0x0e,0x00,0x35,0xd0,0x01,0x05,0x02,0x00]
+0x0e,0x00,0x35,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f16_e64 s[16:17], v1, v2 ; encoding: [0x10,0x00,0x36,0xd0,0x01,0x05,0x02,0x00]
+0x10,0x00,0x36,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f16_e64 s[18:19], v1, v2 ; encoding: [0x12,0x00,0x37,0xd0,0x01,0x05,0x02,0x00]
+0x12,0x00,0x37,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f16_e64 s[20:21], v1, v2 ; encoding: [0x14,0x00,0x38,0xd0,0x01,0x05,0x02,0x00]
+0x14,0x00,0x38,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f16_e64 s[22:23], v1, v2 ; encoding: [0x16,0x00,0x39,0xd0,0x01,0x05,0x02,0x00]
+0x16,0x00,0x39,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f16_e64 s[24:25], v1, v2 ; encoding: [0x18,0x00,0x3a,0xd0,0x01,0x05,0x02,0x00]
+0x18,0x00,0x3a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f16_e64 s[26:27], v1, v2 ; encoding: [0x1a,0x00,0x3b,0xd0,0x01,0x05,0x02,0x00]
+0x1a,0x00,0x3b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f16_e64 s[28:29], v1, v2 ; encoding: [0x1c,0x00,0x3c,0xd0,0x01,0x05,0x02,0x00]
+0x1c,0x00,0x3c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f16_e64 s[30:31], v1, v2 ; encoding: [0x1e,0x00,0x3d,0xd0,0x01,0x05,0x02,0x00]
+0x1e,0x00,0x3d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f16_e64 s[32:33], v1, v2 ; encoding: [0x20,0x00,0x3e,0xd0,0x01,0x05,0x02,0x00]
+0x20,0x00,0x3e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_tru_f16_e64 s[34:35], v1, v2 ; encoding: [0x22,0x00,0x3f,0xd0,0x01,0x05,0x02,0x00]
+0x22,0x00,0x3f,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f32_e64 s[36:37], v1, v2 ; encoding: [0x24,0x00,0x50,0xd0,0x01,0x05,0x02,0x00]
+0x24,0x00,0x50,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_f32_e64 s[38:39], v1, v2 ; encoding: [0x26,0x00,0x51,0xd0,0x01,0x05,0x02,0x00]
+0x26,0x00,0x51,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f32_e64 s[40:41], v1, v2 ; encoding: [0x28,0x00,0x52,0xd0,0x01,0x05,0x02,0x00]
+0x28,0x00,0x52,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f32_e64 s[42:43], v1, v2 ; encoding: [0x2a,0x00,0x53,0xd0,0x01,0x05,0x02,0x00]
+0x2a,0x00,0x53,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f32_e64 s[44:45], v1, v2 ; encoding: [0x2c,0x00,0x54,0xd0,0x01,0x05,0x02,0x00]
+0x2c,0x00,0x54,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f32_e64 s[46:47], v1, v2 ; encoding: [0x2e,0x00,0x55,0xd0,0x01,0x05,0x02,0x00]
+0x2e,0x00,0x55,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f32_e64 s[48:49], v1, v2 ; encoding: [0x30,0x00,0x56,0xd0,0x01,0x05,0x02,0x00]
+0x30,0x00,0x56,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f32_e64 s[50:51], v1, v2 ; encoding: [0x32,0x00,0x57,0xd0,0x01,0x05,0x02,0x00]
+0x32,0x00,0x57,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f32_e64 s[52:53], v1, v2 ; encoding: [0x34,0x00,0x58,0xd0,0x01,0x05,0x02,0x00]
+0x34,0x00,0x58,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f32_e64 s[54:55], v1, v2 ; encoding: [0x36,0x00,0x59,0xd0,0x01,0x05,0x02,0x00]
+0x36,0x00,0x59,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f32_e64 s[56:57], v1, v2 ; encoding: [0x38,0x00,0x5a,0xd0,0x01,0x05,0x02,0x00]
+0x38,0x00,0x5a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f32_e64 s[58:59], v1, v2 ; encoding: [0x3a,0x00,0x5b,0xd0,0x01,0x05,0x02,0x00]
+0x3a,0x00,0x5b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f32_e64 s[60:61], v1, v2 ; encoding: [0x3c,0x00,0x5c,0xd0,0x01,0x05,0x02,0x00]
+0x3c,0x00,0x5c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f32_e64 s[62:63], v1, v2 ; encoding: [0x3e,0x00,0x5d,0xd0,0x01,0x05,0x02,0x00]
+0x3e,0x00,0x5d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f32_e64 s[64:65], v1, v2 ; encoding: [0x40,0x00,0x5e,0xd0,0x01,0x05,0x02,0x00]
+0x40,0x00,0x5e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_tru_f32_e64 s[66:67], v1, v2 ; encoding: [0x42,0x00,0x5f,0xd0,0x01,0x05,0x02,0x00]
+0x42,0x00,0x5f,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f64_e64 s[68:69], v[1:2], v[2:3] ; encoding: [0x44,0x00,0x70,0xd0,0x01,0x05,0x02,0x00]
+0x44,0x00,0x70,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_f64_e64 s[70:71], v[1:2], v[2:3] ; encoding: [0x46,0x00,0x71,0xd0,0x01,0x05,0x02,0x00]
+0x46,0x00,0x71,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f64_e64 s[72:73], v[1:2], v[2:3] ; encoding: [0x48,0x00,0x72,0xd0,0x01,0x05,0x02,0x00]
+0x48,0x00,0x72,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f64_e64 s[74:75], v[1:2], v[2:3] ; encoding: [0x4a,0x00,0x73,0xd0,0x01,0x05,0x02,0x00]
+0x4a,0x00,0x73,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f64_e64 s[76:77], v[1:2], v[2:3] ; encoding: [0x4c,0x00,0x74,0xd0,0x01,0x05,0x02,0x00]
+0x4c,0x00,0x74,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f64_e64 s[78:79], v[1:2], v[2:3] ; encoding: [0x4e,0x00,0x75,0xd0,0x01,0x05,0x02,0x00]
+0x4e,0x00,0x75,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f64_e64 s[80:81], v[1:2], v[2:3] ; encoding: [0x50,0x00,0x76,0xd0,0x01,0x05,0x02,0x00]
+0x50,0x00,0x76,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f64_e64 s[82:83], v[1:2], v[2:3] ; encoding: [0x52,0x00,0x77,0xd0,0x01,0x05,0x02,0x00]
+0x52,0x00,0x77,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f64_e64 s[84:85], v[1:2], v[2:3] ; encoding: [0x54,0x00,0x78,0xd0,0x01,0x05,0x02,0x00]
+0x54,0x00,0x78,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f64_e64 s[86:87], v[1:2], v[2:3] ; encoding: [0x56,0x00,0x79,0xd0,0x01,0x05,0x02,0x00]
+0x56,0x00,0x79,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f64_e64 s[88:89], v[1:2], v[2:3] ; encoding: [0x58,0x00,0x7a,0xd0,0x01,0x05,0x02,0x00]
+0x58,0x00,0x7a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f64_e64 s[90:91], v[1:2], v[2:3] ; encoding: [0x5a,0x00,0x7b,0xd0,0x01,0x05,0x02,0x00]
+0x5a,0x00,0x7b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f64_e64 s[92:93], v[1:2], v[2:3] ; encoding: [0x5c,0x00,0x7c,0xd0,0x01,0x05,0x02,0x00]
+0x5c,0x00,0x7c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f64_e64 s[94:95], v[1:2], v[2:3] ; encoding: [0x5e,0x00,0x7d,0xd0,0x01,0x05,0x02,0x00]
+0x5e,0x00,0x7d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f64_e64 s[96:97], v[1:2], v[2:3] ; encoding: [0x60,0x00,0x7e,0xd0,0x01,0x05,0x02,0x00]
+0x60,0x00,0x7e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_tru_f64_e64 s[98:99], v[1:2], v[2:3] ; encoding: [0x62,0x00,0x7f,0xd0,0x01,0x05,0x02,0x00]
+0x62,0x00,0x7f,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i16_e64 s[100:101], v1, v2 ; encoding: [0x64,0x00,0xb0,0xd0,0x01,0x05,0x02,0x00]
+0x64,0x00,0xb0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i16_e64 flat_scratch, v1, v2 ; encoding: [0x66,0x00,0xb1,0xd0,0x01,0x05,0x02,0x00]
+0x66,0x00,0xb1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i16_e64 xnack_mask, v1, v2 ; encoding: [0x68,0x00,0xb2,0xd0,0x01,0x05,0x02,0x00]
+0x68,0x00,0xb2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i16_e64 vcc, v1, v2 ; encoding: [0x6a,0x00,0xb3,0xd0,0x01,0x05,0x02,0x00]
+0x6a,0x00,0xb3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i16_e64 tba, v1, v2 ; encoding: [0x6c,0x00,0xb4,0xd0,0x01,0x05,0x02,0x00]
+0x6c,0x00,0xb4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i16_e64 tma, v1, v2 ; encoding: [0x6e,0x00,0xb5,0xd0,0x01,0x05,0x02,0x00]
+0x6e,0x00,0xb5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i16_e64 ttmp[0:1], v1, v2 ; encoding: [0x70,0x00,0xb6,0xd0,0x01,0x05,0x02,0x00]
+0x70,0x00,0xb6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i16_e64 ttmp[2:3], v1, v2 ; encoding: [0x72,0x00,0xb7,0xd0,0x01,0x05,0x02,0x00]
+0x72,0x00,0xb7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u16_e64 ttmp[4:5], v1, v2 ; encoding: [0x74,0x00,0xb8,0xd0,0x01,0x05,0x02,0x00]
+0x74,0x00,0xb8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u16_e64 ttmp[6:7], v1, v2 ; encoding: [0x76,0x00,0xb9,0xd0,0x01,0x05,0x02,0x00]
+0x76,0x00,0xb9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u16_e64 ttmp[8:9], v1, v2 ; encoding: [0x78,0x00,0xba,0xd0,0x01,0x05,0x02,0x00]
+0x78,0x00,0xba,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u16_e64 ttmp[10:11], v1, v2 ; encoding: [0x7a,0x00,0xbb,0xd0,0x01,0x05,0x02,0x00]
+0x7a,0x00,0xbb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u16_e64 exec, v1, v2 ; encoding: [0x7e,0x00,0xbc,0xd0,0x01,0x05,0x02,0x00]
+0x7e,0x00,0xbc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xbd,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xbd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xbe,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xbe,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xbf,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xbf,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd0,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd1,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd2,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd3,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd4,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd5,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd6,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd7,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd8,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd9,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xda,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xda,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdb,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdc,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdd,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xde,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xde,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdf,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdf,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf0,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf1,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf2,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf3,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf4,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf5,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf6,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf7,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf8,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf9,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfa,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfa,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfb,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfc,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfd,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfe,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfe,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xff,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xff,0xd0,0x01,0x05,0x02,0x00
+
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3c_nowarn.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3c_nowarn.txt
new file mode 100644
index 0000000..0c4f107
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3c_nowarn.txt
@@ -0,0 +1,402 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx900 -disassemble -show-encoding < %s 2>&1 | FileCheck -strict-whitespace %s
+
+# In GFX10+, v_cmpx_* use EXEC as the implicit dst. The disassembler issues a warning when the dst
+# is not 0x7e (EXEC). In GFX9 and earlier, these instructions have explicit dst. Therefore, such
+# warnings should not be issued.
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmp_class_f32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0x10,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0x10,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmp_class_f32_e64 flat_scratch, v1, v2 ; encoding: [0x66,0x00,0x10,0xd0,0x01,0x05,0x02,0x00]
+0x66,0x00,0x10,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmp_class_f32_e64 vcc, v1, v2 ; encoding: [0x6a,0x00,0x10,0xd0,0x01,0x05,0x02,0x00]
+0x6a,0x00,0x10,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f64_e64 s[0:1], v[1:2], v2 ; encoding: [0x00,0x00,0x13,0xd0,0x01,0x05,0x02,0x00]
+0x00,0x00,0x13,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f16_e64 s[2:3], v1, v2 ; encoding: [0x02,0x00,0x15,0xd0,0x01,0x05,0x02,0x00]
+0x02,0x00,0x15,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f16_e64 s[4:5], v1, v2 ; encoding: [0x04,0x00,0x30,0xd0,0x01,0x05,0x02,0x00]
+0x04,0x00,0x30,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_f16_e64 s[6:7], v1, v2 ; encoding: [0x06,0x00,0x31,0xd0,0x01,0x05,0x02,0x00]
+0x06,0x00,0x31,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f16_e64 s[8:9], v1, v2 ; encoding: [0x08,0x00,0x32,0xd0,0x01,0x05,0x02,0x00]
+0x08,0x00,0x32,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0x33,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0x33,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f16_e64 s[12:13], v1, v2 ; encoding: [0x0c,0x00,0x34,0xd0,0x01,0x05,0x02,0x00]
+0x0c,0x00,0x34,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f16_e64 s[14:15], v1, v2 ; encoding: [0x0e,0x00,0x35,0xd0,0x01,0x05,0x02,0x00]
+0x0e,0x00,0x35,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f16_e64 s[16:17], v1, v2 ; encoding: [0x10,0x00,0x36,0xd0,0x01,0x05,0x02,0x00]
+0x10,0x00,0x36,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f16_e64 s[18:19], v1, v2 ; encoding: [0x12,0x00,0x37,0xd0,0x01,0x05,0x02,0x00]
+0x12,0x00,0x37,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f16_e64 s[20:21], v1, v2 ; encoding: [0x14,0x00,0x38,0xd0,0x01,0x05,0x02,0x00]
+0x14,0x00,0x38,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f16_e64 s[22:23], v1, v2 ; encoding: [0x16,0x00,0x39,0xd0,0x01,0x05,0x02,0x00]
+0x16,0x00,0x39,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f16_e64 s[24:25], v1, v2 ; encoding: [0x18,0x00,0x3a,0xd0,0x01,0x05,0x02,0x00]
+0x18,0x00,0x3a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f16_e64 s[26:27], v1, v2 ; encoding: [0x1a,0x00,0x3b,0xd0,0x01,0x05,0x02,0x00]
+0x1a,0x00,0x3b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f16_e64 s[28:29], v1, v2 ; encoding: [0x1c,0x00,0x3c,0xd0,0x01,0x05,0x02,0x00]
+0x1c,0x00,0x3c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f16_e64 s[30:31], v1, v2 ; encoding: [0x1e,0x00,0x3d,0xd0,0x01,0x05,0x02,0x00]
+0x1e,0x00,0x3d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f16_e64 s[32:33], v1, v2 ; encoding: [0x20,0x00,0x3e,0xd0,0x01,0x05,0x02,0x00]
+0x20,0x00,0x3e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_tru_f16_e64 s[34:35], v1, v2 ; encoding: [0x22,0x00,0x3f,0xd0,0x01,0x05,0x02,0x00]
+0x22,0x00,0x3f,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f32_e64 s[36:37], v1, v2 ; encoding: [0x24,0x00,0x50,0xd0,0x01,0x05,0x02,0x00]
+0x24,0x00,0x50,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_f32_e64 s[38:39], v1, v2 ; encoding: [0x26,0x00,0x51,0xd0,0x01,0x05,0x02,0x00]
+0x26,0x00,0x51,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f32_e64 s[40:41], v1, v2 ; encoding: [0x28,0x00,0x52,0xd0,0x01,0x05,0x02,0x00]
+0x28,0x00,0x52,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f32_e64 s[42:43], v1, v2 ; encoding: [0x2a,0x00,0x53,0xd0,0x01,0x05,0x02,0x00]
+0x2a,0x00,0x53,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f32_e64 s[44:45], v1, v2 ; encoding: [0x2c,0x00,0x54,0xd0,0x01,0x05,0x02,0x00]
+0x2c,0x00,0x54,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f32_e64 s[46:47], v1, v2 ; encoding: [0x2e,0x00,0x55,0xd0,0x01,0x05,0x02,0x00]
+0x2e,0x00,0x55,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f32_e64 s[48:49], v1, v2 ; encoding: [0x30,0x00,0x56,0xd0,0x01,0x05,0x02,0x00]
+0x30,0x00,0x56,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f32_e64 s[50:51], v1, v2 ; encoding: [0x32,0x00,0x57,0xd0,0x01,0x05,0x02,0x00]
+0x32,0x00,0x57,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f32_e64 s[52:53], v1, v2 ; encoding: [0x34,0x00,0x58,0xd0,0x01,0x05,0x02,0x00]
+0x34,0x00,0x58,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f32_e64 s[54:55], v1, v2 ; encoding: [0x36,0x00,0x59,0xd0,0x01,0x05,0x02,0x00]
+0x36,0x00,0x59,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f32_e64 s[56:57], v1, v2 ; encoding: [0x38,0x00,0x5a,0xd0,0x01,0x05,0x02,0x00]
+0x38,0x00,0x5a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f32_e64 s[58:59], v1, v2 ; encoding: [0x3a,0x00,0x5b,0xd0,0x01,0x05,0x02,0x00]
+0x3a,0x00,0x5b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f32_e64 s[60:61], v1, v2 ; encoding: [0x3c,0x00,0x5c,0xd0,0x01,0x05,0x02,0x00]
+0x3c,0x00,0x5c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f32_e64 s[62:63], v1, v2 ; encoding: [0x3e,0x00,0x5d,0xd0,0x01,0x05,0x02,0x00]
+0x3e,0x00,0x5d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f32_e64 s[64:65], v1, v2 ; encoding: [0x40,0x00,0x5e,0xd0,0x01,0x05,0x02,0x00]
+0x40,0x00,0x5e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f64_e64 s[66:67], v[1:2], v[2:3] ; encoding: [0x42,0x00,0x70,0xd0,0x01,0x05,0x02,0x00]
+0x42,0x00,0x70,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f64_e64 s[68:69], v[1:2], v[2:3] ; encoding: [0x44,0x00,0x72,0xd0,0x01,0x05,0x02,0x00]
+0x44,0x00,0x72,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f64_e64 s[70:71], v[1:2], v[2:3] ; encoding: [0x46,0x00,0x73,0xd0,0x01,0x05,0x02,0x00]
+0x46,0x00,0x73,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f64_e64 s[72:73], v[1:2], v[2:3] ; encoding: [0x48,0x00,0x74,0xd0,0x01,0x05,0x02,0x00]
+0x48,0x00,0x74,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f64_e64 s[74:75], v[1:2], v[2:3] ; encoding: [0x4a,0x00,0x75,0xd0,0x01,0x05,0x02,0x00]
+0x4a,0x00,0x75,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f64_e64 s[76:77], v[1:2], v[2:3] ; encoding: [0x4c,0x00,0x76,0xd0,0x01,0x05,0x02,0x00]
+0x4c,0x00,0x76,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f64_e64 s[78:79], v[1:2], v[2:3] ; encoding: [0x4e,0x00,0x77,0xd0,0x01,0x05,0x02,0x00]
+0x4e,0x00,0x77,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f64_e64 s[80:81], v[1:2], v[2:3] ; encoding: [0x50,0x00,0x78,0xd0,0x01,0x05,0x02,0x00]
+0x50,0x00,0x78,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f64_e64 s[82:83], v[1:2], v[2:3] ; encoding: [0x52,0x00,0x79,0xd0,0x01,0x05,0x02,0x00]
+0x52,0x00,0x79,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f64_e64 s[84:85], v[1:2], v[2:3] ; encoding: [0x54,0x00,0x7a,0xd0,0x01,0x05,0x02,0x00]
+0x54,0x00,0x7a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f64_e64 s[86:87], v[1:2], v[2:3] ; encoding: [0x56,0x00,0x7b,0xd0,0x01,0x05,0x02,0x00]
+0x56,0x00,0x7b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f64_e64 s[88:89], v[1:2], v[2:3] ; encoding: [0x58,0x00,0x7c,0xd0,0x01,0x05,0x02,0x00]
+0x58,0x00,0x7c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f64_e64 s[90:91], v[1:2], v[2:3] ; encoding: [0x5a,0x00,0x7d,0xd0,0x01,0x05,0x02,0x00]
+0x5a,0x00,0x7d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f64_e64 s[92:93], v[1:2], v[2:3] ; encoding: [0x5c,0x00,0x7e,0xd0,0x01,0x05,0x02,0x00]
+0x5c,0x00,0x7e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_tru_f64_e64 s[94:95], v[1:2], v[2:3] ; encoding: [0x5e,0x00,0x7f,0xd0,0x01,0x05,0x02,0x00]
+0x5e,0x00,0x7f,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i16_e64 s[96:97], v1, v2 ; encoding: [0x60,0x00,0xb0,0xd0,0x01,0x05,0x02,0x00]
+0x60,0x00,0xb0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i16_e64 s[98:99], v1, v2 ; encoding: [0x62,0x00,0xb1,0xd0,0x01,0x05,0x02,0x00]
+0x62,0x00,0xb1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i16_e64 s[100:101], v1, v2 ; encoding: [0x64,0x00,0xb2,0xd0,0x01,0x05,0x02,0x00]
+0x64,0x00,0xb2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i16_e64 flat_scratch, v1, v2 ; encoding: [0x66,0x00,0xb3,0xd0,0x01,0x05,0x02,0x00]
+0x66,0x00,0xb3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i16_e64 xnack_mask, v1, v2 ; encoding: [0x68,0x00,0xb4,0xd0,0x01,0x05,0x02,0x00]
+0x68,0x00,0xb4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i16_e64 vcc, v1, v2 ; encoding: [0x6a,0x00,0xb5,0xd0,0x01,0x05,0x02,0x00]
+0x6a,0x00,0xb5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i16_e64 ttmp[0:1], v1, v2 ; encoding: [0x6c,0x00,0xb6,0xd0,0x01,0x05,0x02,0x00]
+0x6c,0x00,0xb6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i16_e64 ttmp[2:3], v1, v2 ; encoding: [0x6e,0x00,0xb7,0xd0,0x01,0x05,0x02,0x00]
+0x6e,0x00,0xb7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u16_e64 ttmp[4:5], v1, v2 ; encoding: [0x70,0x00,0xb8,0xd0,0x01,0x05,0x02,0x00]
+0x70,0x00,0xb8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u16_e64 ttmp[6:7], v1, v2 ; encoding: [0x72,0x00,0xb9,0xd0,0x01,0x05,0x02,0x00]
+0x72,0x00,0xb9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u16_e64 ttmp[8:9], v1, v2 ; encoding: [0x74,0x00,0xba,0xd0,0x01,0x05,0x02,0x00]
+0x74,0x00,0xba,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u16_e64 ttmp[10:11], v1, v2 ; encoding: [0x76,0x00,0xbb,0xd0,0x01,0x05,0x02,0x00]
+0x76,0x00,0xbb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u16_e64 ttmp[12:13], v1, v2 ; encoding: [0x78,0x00,0xbc,0xd0,0x01,0x05,0x02,0x00]
+0x78,0x00,0xbc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u16_e64 ttmp[14:15], v1, v2 ; encoding: [0x7a,0x00,0xbd,0xd0,0x01,0x05,0x02,0x00]
+0x7a,0x00,0xbd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xbe,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xbe,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xbf,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xbf,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd0,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd1,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd2,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd3,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd4,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd5,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd6,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd7,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd8,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd9,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xda,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xda,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdb,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdc,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdd,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xde,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xde,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdf,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdf,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf0,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf1,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf2,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf3,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf4,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf5,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf6,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf7,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf8,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf9,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfa,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfa,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfb,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfc,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfd,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfe,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfe,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xff,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xff,0xd0,0x01,0x05,0x02,0x00
+
diff --git a/llvm/test/Other/loop-pm-invalidation.ll b/llvm/test/Other/loop-pm-invalidation.ll
index 4bead0b..25552f7 100644
--- a/llvm/test/Other/loop-pm-invalidation.ll
+++ b/llvm/test/Other/loop-pm-invalidation.ll
@@ -16,11 +16,6 @@
; RUN: opt -disable-output -disable-verify -verify-analysis-invalidation=0 -debug-pass-manager %s -aa-pipeline= 2>&1 \
; RUN: -passes='loop(no-op-loop,loop-deletion),invalidate<scalar-evolution>,loop(no-op-loop)' \
; RUN: | FileCheck %s --check-prefix=CHECK-SCEV-INV-AFTER-DELETE
-;
-; Test that BFI is invalidated after the loop adapter if any of the loop passes
-; invalidated it.
-; RUN: opt -disable-output -disable-verify -verify-analysis-invalidation=0 -debug-pass-manager %s -aa-pipeline= 2>&1 \
-; RUN: -O1 | FileCheck %s --check-prefix=CHECK-BFI-INV
define void @no_loops() {
; CHECK-LOOP-INV: Running pass: LoopSimplifyPass
@@ -247,28 +242,3 @@ l0.header:
exit:
ret void
}
-
-; CHECK-BFI-INV-LABEL: Running analysis: OuterAnalysisManagerProxy<{{.*}}> on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: LoopInstSimplifyPass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: LoopSimplifyCFGPass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: LICMPass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: LoopRotatePass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: LICMPass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: SimpleLoopUnswitchPass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Invalidating analysis: PostDominatorTreeAnalysis on simplifiable_loop
-; CHECK-BFI-INV-NEXT: Invalidating analysis: BranchProbabilityAnalysis on simplifiable_loop
-; CHECK-BFI-INV-NEXT: Invalidating analysis: BlockFrequencyAnalysis on simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: SimplifyCFGPass on simplifiable_loop (5 instructions)
-
-define void @simplifiable_loop(i1 %c) !prof !0 {
-entry:
- br label %l0.header
-
-l0.header:
- br label %l0.latch
-
-l0.latch:
- br i1 %c, label %l0.header, label %l0.latch
-}
-
-!0 = !{!"function_entry_count", i64 1}
diff --git a/llvm/test/Other/new-pm-defaults.ll b/llvm/test/Other/new-pm-defaults.ll
index 94e860b..65b96c8 100644
--- a/llvm/test/Other/new-pm-defaults.ll
+++ b/llvm/test/Other/new-pm-defaults.ll
@@ -186,7 +186,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
index a08a140..3a0fffe 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
@@ -114,7 +114,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
index d9e2dd3..4623edc 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
@@ -100,7 +100,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
index 2f6fa4b..590afd9 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
@@ -109,7 +109,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
index 5aacd26..dd6acd2 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
@@ -146,7 +146,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
index f6a9406..ee05452 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
@@ -149,7 +149,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
index 48a9433..fd95e94 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
@@ -114,7 +114,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll b/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll
index 9371fe2..dbd572d 100644
--- a/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -p indvars -S %s | FileCheck %s
+; RUN: opt -p indvars -data-layout='n32:64' -S %s | FileCheck --check-prefix=N32 %s
declare i1 @cond()
@@ -28,6 +29,32 @@ define i64 @test_ptr_compare_guard(ptr %start, ptr %end) {
; CHECK-NEXT: [[RES:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[RES_PH]], %[[EXIT_LOOPEXIT]] ]
; CHECK-NEXT: ret i64 [[RES]]
;
+; N32-LABEL: define i64 @test_ptr_compare_guard(
+; N32-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) {
+; N32-NEXT: [[ENTRY:.*]]:
+; N32-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64
+; N32-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64
+; N32-NEXT: [[C_0:%.*]] = icmp eq ptr [[START]], [[END]]
+; N32-NEXT: br i1 [[C_0]], label %[[EXIT:.*]], label %[[LOOP_HEADER_PREHEADER:.*]]
+; N32: [[LOOP_HEADER_PREHEADER]]:
+; N32-NEXT: [[TMP0:%.*]] = add i64 [[END1]], -1
+; N32-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]]
+; N32-NEXT: br label %[[LOOP_HEADER:.*]]
+; N32: [[LOOP_HEADER]]:
+; N32-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[PTR_IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[START]], %[[LOOP_HEADER_PREHEADER]] ]
+; N32-NEXT: [[C_1:%.*]] = call i1 @cond()
+; N32-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[EXIT_LOOPEXIT:.*]]
+; N32: [[LOOP_LATCH]]:
+; N32-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1
+; N32-NEXT: [[C_2:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]]
+; N32-NEXT: br i1 [[C_2]], label %[[EXIT_LOOPEXIT]], label %[[LOOP_HEADER]]
+; N32: [[EXIT_LOOPEXIT]]:
+; N32-NEXT: [[RES_PH:%.*]] = phi i64 [ 0, %[[LOOP_HEADER]] ], [ [[TMP1]], %[[LOOP_LATCH]] ]
+; N32-NEXT: br label %[[EXIT]]
+; N32: [[EXIT]]:
+; N32-NEXT: [[RES:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[RES_PH]], %[[EXIT_LOOPEXIT]] ]
+; N32-NEXT: ret i64 [[RES]]
+;
entry:
%c.0 = icmp eq ptr %start, %end
br i1 %c.0, label %exit, label %loop.header
@@ -48,3 +75,149 @@ exit:
%res = phi i64 [ 0, %entry ], [ %i64.iv, %loop.latch ], [ 0, %loop.header ]
ret i64 %res
}
+
+define void @test_sub_cmp(ptr align 8 %start, ptr %end) {
+; CHECK-LABEL: define void @test_sub_cmp(
+; CHECK-SAME: ptr align 8 [[START:%.*]], ptr [[END:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[START_INT:%.*]] = ptrtoint ptr [[START]] to i64
+; CHECK-NEXT: [[END_INT:%.*]] = ptrtoint ptr [[END]] to i64
+; CHECK-NEXT: [[PTR_DIFF:%.*]] = sub i64 [[START_INT]], [[END_INT]]
+; CHECK-NEXT: [[CMP_ENTRY:%.*]] = icmp eq ptr [[START]], [[END]]
+; CHECK-NEXT: br i1 [[CMP_ENTRY]], label %[[EXIT:.*]], label %[[LOOP_HEADER_PREHEADER:.*]]
+; CHECK: [[LOOP_HEADER_PREHEADER]]:
+; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
+; CHECK: [[LOOP_HEADER]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[LOOP_HEADER_PREHEADER]] ]
+; CHECK-NEXT: [[C_1:%.*]] = call i1 @cond()
+; CHECK-NEXT: br i1 [[C_1]], label %[[EXIT_EARLY:.*]], label %[[LOOP_LATCH]]
+; CHECK: [[LOOP_LATCH]]:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
+; CHECK-NEXT: [[CMP_LATCH:%.*]] = icmp ult i64 [[IV_NEXT]], [[PTR_DIFF]]
+; CHECK-NEXT: br i1 [[CMP_LATCH]], label %[[LOOP_HEADER]], label %[[EXIT_LOOPEXIT:.*]]
+; CHECK: [[EXIT_EARLY]]:
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; N32-LABEL: define void @test_sub_cmp(
+; N32-SAME: ptr align 8 [[START:%.*]], ptr [[END:%.*]]) {
+; N32-NEXT: [[ENTRY:.*:]]
+; N32-NEXT: [[START_INT:%.*]] = ptrtoint ptr [[START]] to i64
+; N32-NEXT: [[END_INT:%.*]] = ptrtoint ptr [[END]] to i64
+; N32-NEXT: [[PTR_DIFF:%.*]] = sub i64 [[START_INT]], [[END_INT]]
+; N32-NEXT: [[CMP_ENTRY:%.*]] = icmp eq ptr [[START]], [[END]]
+; N32-NEXT: br i1 [[CMP_ENTRY]], label %[[EXIT:.*]], label %[[LOOP_HEADER_PREHEADER:.*]]
+; N32: [[LOOP_HEADER_PREHEADER]]:
+; N32-NEXT: br label %[[LOOP_HEADER:.*]]
+; N32: [[LOOP_HEADER]]:
+; N32-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[LOOP_HEADER_PREHEADER]] ]
+; N32-NEXT: [[C_1:%.*]] = call i1 @cond()
+; N32-NEXT: br i1 [[C_1]], label %[[EXIT_EARLY:.*]], label %[[LOOP_LATCH]]
+; N32: [[LOOP_LATCH]]:
+; N32-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
+; N32-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[PTR_DIFF]]
+; N32-NEXT: br i1 [[EXITCOND]], label %[[LOOP_HEADER]], label %[[EXIT_LOOPEXIT:.*]]
+; N32: [[EXIT_EARLY]]:
+; N32-NEXT: br label %[[EXIT]]
+; N32: [[EXIT_LOOPEXIT]]:
+; N32-NEXT: br label %[[EXIT]]
+; N32: [[EXIT]]:
+; N32-NEXT: ret void
+;
+entry:
+ %start.int = ptrtoint ptr %start to i64
+ %end.int = ptrtoint ptr %end to i64
+ %ptr.diff = sub i64 %start.int, %end.int
+ %cmp.entry = icmp eq ptr %start, %end
+ br i1 %cmp.entry, label %exit, label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
+ %c.1 = call i1 @cond()
+ br i1 %c.1, label %exit.early, label %loop.latch
+
+loop.latch:
+ %iv.next = add i64 %iv, 1
+ %cmp.latch = icmp ult i64 %iv.next, %ptr.diff
+ br i1 %cmp.latch, label %loop.header, label %exit
+
+exit.early:
+ br label %exit
+
+exit:
+ ret void
+}
+
+
+define void @test_ptr_diff_with_assume(ptr align 8 %start, ptr align 8 %end, ptr %P) {
+; CHECK-LABEL: define void @test_ptr_diff_with_assume(
+; CHECK-SAME: ptr align 8 [[START:%.*]], ptr align 8 [[END:%.*]], ptr [[P:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[START_INT:%.*]] = ptrtoint ptr [[START]] to i64
+; CHECK-NEXT: [[END_INT:%.*]] = ptrtoint ptr [[END]] to i64
+; CHECK-NEXT: [[PTR_DIFF:%.*]] = sub i64 [[START_INT]], [[END_INT]]
+; CHECK-NEXT: [[DIFF_CMP:%.*]] = icmp ult i64 [[PTR_DIFF]], 2
+; CHECK-NEXT: call void @llvm.assume(i1 [[DIFF_CMP]])
+; CHECK-NEXT: [[COMPUTED_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[PTR_DIFF]]
+; CHECK-NEXT: [[ENTRY_CMP:%.*]] = icmp eq ptr [[START]], [[END]]
+; CHECK-NEXT: br i1 [[ENTRY_CMP]], label %[[EXIT:.*]], label %[[LOOP_BODY_PREHEADER:.*]]
+; CHECK: [[LOOP_BODY_PREHEADER]]:
+; CHECK-NEXT: br label %[[LOOP_BODY:.*]]
+; CHECK: [[LOOP_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP_BODY]] ], [ [[START]], %[[LOOP_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = call i1 @cond()
+; CHECK-NEXT: [[IV_NEXT]] = getelementptr i8, ptr [[IV]], i64 1
+; CHECK-NEXT: [[LOOP_CMP:%.*]] = icmp eq ptr [[IV_NEXT]], [[COMPUTED_END]]
+; CHECK-NEXT: br i1 [[LOOP_CMP]], label %[[EXIT_LOOPEXIT:.*]], label %[[LOOP_BODY]]
+; CHECK: [[EXIT_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; N32-LABEL: define void @test_ptr_diff_with_assume(
+; N32-SAME: ptr align 8 [[START:%.*]], ptr align 8 [[END:%.*]], ptr [[P:%.*]]) {
+; N32-NEXT: [[ENTRY:.*:]]
+; N32-NEXT: [[START_INT:%.*]] = ptrtoint ptr [[START]] to i64
+; N32-NEXT: [[END_INT:%.*]] = ptrtoint ptr [[END]] to i64
+; N32-NEXT: [[PTR_DIFF:%.*]] = sub i64 [[START_INT]], [[END_INT]]
+; N32-NEXT: [[DIFF_CMP:%.*]] = icmp ult i64 [[PTR_DIFF]], 2
+; N32-NEXT: call void @llvm.assume(i1 [[DIFF_CMP]])
+; N32-NEXT: [[COMPUTED_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[PTR_DIFF]]
+; N32-NEXT: [[ENTRY_CMP:%.*]] = icmp eq ptr [[START]], [[END]]
+; N32-NEXT: br i1 [[ENTRY_CMP]], label %[[EXIT:.*]], label %[[LOOP_BODY_PREHEADER:.*]]
+; N32: [[LOOP_BODY_PREHEADER]]:
+; N32-NEXT: br label %[[LOOP_BODY:.*]]
+; N32: [[LOOP_BODY]]:
+; N32-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP_BODY]] ], [ [[START]], %[[LOOP_BODY_PREHEADER]] ]
+; N32-NEXT: [[TMP0:%.*]] = call i1 @cond()
+; N32-NEXT: [[IV_NEXT]] = getelementptr i8, ptr [[IV]], i64 1
+; N32-NEXT: [[LOOP_CMP:%.*]] = icmp eq ptr [[IV_NEXT]], [[COMPUTED_END]]
+; N32-NEXT: br i1 [[LOOP_CMP]], label %[[EXIT_LOOPEXIT:.*]], label %[[LOOP_BODY]]
+; N32: [[EXIT_LOOPEXIT]]:
+; N32-NEXT: br label %[[EXIT]]
+; N32: [[EXIT]]:
+; N32-NEXT: ret void
+;
+entry:
+ %start.int = ptrtoint ptr %start to i64
+ %end.int = ptrtoint ptr %end to i64
+ %ptr.diff = sub i64 %start.int, %end.int
+ %diff.cmp = icmp ult i64 %ptr.diff, 2
+ call void @llvm.assume(i1 %diff.cmp)
+ %computed.end = getelementptr i8, ptr %start, i64 %ptr.diff
+ %entry.cmp = icmp eq ptr %start, %end
+ br i1 %entry.cmp, label %exit, label %loop.body
+
+loop.body:
+ %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop.body ]
+ call i1 @cond()
+ %iv.next = getelementptr i8, ptr %iv, i64 1
+ %loop.cmp = icmp eq ptr %iv.next, %computed.end
+ br i1 %loop.cmp, label %exit, label %loop.body
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/InstCombine/icmp-trunc.ll b/llvm/test/Transforms/InstCombine/icmp-trunc.ll
index b85deab..ad76ef7 100644
--- a/llvm/test/Transforms/InstCombine/icmp-trunc.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-trunc.ll
@@ -3,6 +3,7 @@
; RUN: opt < %s -passes=instcombine -S -data-layout="n8" | FileCheck %s --check-prefixes=CHECK,DL8
declare void @use(i8)
+declare void @use2(i4)
define i1 @ult_2(i32 %x) {
; CHECK-LABEL: @ult_2(
@@ -785,3 +786,32 @@ define <2 x i1> @uge_nsw_non_splat(<2 x i32> %x) {
ret <2 x i1> %r
}
+define i1 @trunc_icmp(i8 %a0) {
+; CHECK-LABEL: @trunc_icmp(
+; CHECK-NEXT: [[TZ:%.*]] = tail call range(i8 0, 9) i8 @llvm.cttz.i8(i8 [[A0:%.*]], i1 false)
+; CHECK-NEXT: [[TR:%.*]] = trunc nuw i8 [[TZ]] to i4
+; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A0]], 0
+; CHECK-NEXT: call void @use2(i4 [[TR]])
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %tz = tail call range(i8 0, 9) i8 @llvm.cttz.i8(i8 %a0, i1 false)
+ %tr = trunc i8 %tz to i4
+ %c = icmp eq i4 %tr, 8
+ call void @use2(i4 %tr)
+ ret i1 %c
+}
+
+define i1 @do_not_mask_trunc_eq_i32_i8(i32 %x) {
+; DL64-LABEL: @do_not_mask_trunc_eq_i32_i8(
+; DL64-NEXT: [[R:%.*]] = icmp eq i32 [[X:%.*]], 42
+; DL64-NEXT: ret i1 [[R]]
+;
+; DL8-LABEL: @do_not_mask_trunc_eq_i32_i8(
+; DL8-NEXT: [[T:%.*]] = trunc nuw i32 [[X:%.*]] to i8
+; DL8-NEXT: [[R:%.*]] = icmp eq i8 [[T]], 42
+; DL8-NEXT: ret i1 [[R]]
+;
+ %t = trunc nuw i32 %x to i8
+ %r = icmp eq i8 %t, 42
+ ret i1 %r
+}
diff --git a/llvm/test/Transforms/InstCombine/scmp.ll b/llvm/test/Transforms/InstCombine/scmp.ll
index 2bf22ae..c0be5b9 100644
--- a/llvm/test/Transforms/InstCombine/scmp.ll
+++ b/llvm/test/Transforms/InstCombine/scmp.ll
@@ -423,6 +423,86 @@ define i8 @scmp_from_select_eq_and_gt_commuted3(i32 %x, i32 %y) {
ret i8 %r
}
+; Commutative tests for (x != y) ? (x > y ? 1 : -1) : 0
+define i8 @scmp_from_select_ne_and_gt_commuted1(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_and_gt_commuted1(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[Y]], i32 [[X]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, %y
+ %gt = icmp slt i32 %x, %y
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_ne_and_gt_commuted2(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_and_gt_commuted2(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[Y]], i32 [[X]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, %y
+ %gt = icmp sgt i32 %x, %y
+ %sel1 = select i1 %gt, i8 -1, i8 1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_ne_and_gt_commuted3(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_and_gt_commuted3(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, %y
+ %gt = icmp sgt i32 %x, %y
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+; Commutative tests for x != C ? (x > C - 1 ? 1 : -1) : 0
+define i8 @scmp_from_select_ne_const_and_gt_commuted1(i32 %x) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_const_and_gt_commuted1(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 5)
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, 5
+ %gt = icmp sgt i32 %x, 4
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_ne_const_and_gt_commuted2(i32 %x) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_const_and_gt_commuted2(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 5)
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, 5
+ %gt = icmp sgt i32 %x, 4
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_ne_const_and_gt_commuted3(i32 %x) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_const_and_gt_commuted3(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 5)
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, 5
+ %gt = icmp sgt i32 %x, 4
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
define <3 x i2> @scmp_unary_shuffle_ops(<3 x i8> %x, <3 x i8> %y) {
; CHECK-LABEL: define <3 x i2> @scmp_unary_shuffle_ops(
; CHECK-SAME: <3 x i8> [[X:%.*]], <3 x i8> [[Y:%.*]]) {
@@ -436,6 +516,187 @@ define <3 x i2> @scmp_unary_shuffle_ops(<3 x i8> %x, <3 x i8> %y) {
ret <3 x i2> %r
}
+define i32 @scmp_sgt_slt(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_sgt_slt(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[A_LOBIT:%.*]] = ashr i32 [[A]], 31
+; CHECK-NEXT: [[CMP_INV:%.*]] = icmp slt i32 [[A]], 1
+; CHECK-NEXT: [[RETVAL_0:%.*]] = select i1 [[CMP_INV]], i32 [[A_LOBIT]], i32 1
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp sgt i32 %a, 0
+ %cmp1 = icmp slt i32 %a, 0
+ %. = select i1 %cmp1, i32 -1, i32 0
+ %retval.0 = select i1 %cmp, i32 1, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @scmp_zero_slt(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_slt(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[RETVAL_0:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 0)
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1.inv = icmp slt i32 %a, 1
+ %. = select i1 %cmp1.inv, i32 -1, i32 1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @scmp_zero_sgt(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_sgt(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[RETVAL_0:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 0)
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1.inv = icmp sgt i32 %a, -1
+ %. = select i1 %cmp1.inv, i32 1, i32 -1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
+
+define i32 @scmp_zero_sgt_1(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_sgt_1(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[COND2:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 0)
+; CHECK-NEXT: ret i32 [[COND2]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp sgt i32 %a, -1
+ %cond = select i1 %cmp1, i32 1, i32 -1
+ %cond2 = select i1 %cmp, i32 0, i32 %cond
+ ret i32 %cond2
+}
+
+define i32 @scmp_zero_slt_1(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_slt_1(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[COND2:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 0)
+; CHECK-NEXT: ret i32 [[COND2]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp slt i32 %a, 1
+ %cond = select i1 %cmp1, i32 -1, i32 1
+ %cond2 = select i1 %cmp, i32 0, i32 %cond
+ ret i32 %cond2
+}
+
+define i32 @scmp_zero_slt_neg(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_slt_neg(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[A]], -1
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP1]], i32 -1, i32 1
+; CHECK-NEXT: [[COND2:%.*]] = select i1 [[CMP]], i32 0, i32 [[COND]]
+; CHECK-NEXT: ret i32 [[COND2]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp slt i32 %a, -1
+ %cond = select i1 %cmp1, i32 -1, i32 1
+ %cond2 = select i1 %cmp, i32 0, i32 %cond
+ ret i32 %cond2
+}
+
+define i32 @scmp_zero_sgt_neg(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_sgt_neg(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[A]], 1
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP1]], i32 1, i32 -1
+; CHECK-NEXT: [[COND2:%.*]] = select i1 [[CMP]], i32 0, i32 [[COND]]
+; CHECK-NEXT: ret i32 [[COND2]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp sgt i32 %a, 1
+ %cond = select i1 %cmp1, i32 1, i32 -1
+ %cond2 = select i1 %cmp, i32 0, i32 %cond
+ ret i32 %cond2
+}
+
+define i32 @ucmp_ugt_ult_neg(i32 %a) {
+; CHECK-LABEL: define i32 @ucmp_ugt_ult_neg(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[RETVAL_0:%.*]] = zext i1 [[CMP_NOT]] to i32
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp ugt i32 %a, 0
+ %cmp1 = icmp ult i32 %a, 0
+ %. = select i1 %cmp1, i32 -1, i32 0
+ %retval.0 = select i1 %cmp, i32 1, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @ucmp_zero_ult_neg(i32 %a) {
+; CHECK-LABEL: define i32 @ucmp_zero_ult_neg(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[RETVAL_0:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1.inv = icmp ult i32 %a, 1
+ %. = select i1 %cmp1.inv, i32 -1, i32 1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @ucmp_zero_ugt_neg(i32 %a) {
+; CHECK-LABEL: define i32 @ucmp_zero_ugt_neg(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[RETVAL_0:%.*]] = sext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1.inv = icmp ugt i32 %a, -1
+ %. = select i1 %cmp1.inv, i32 1, i32 -1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @scmp_sgt_slt_ab(i32 %a, i32 %b) {
+; CHECK-LABEL: define i32 @scmp_sgt_slt_ab(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[RETVAL_0:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp sgt i32 %a, %b
+ %cmp1 = icmp slt i32 %a, %b
+ %. = select i1 %cmp1, i32 -1, i32 0
+ %retval.0 = select i1 %cmp, i32 1, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @scmp_zero_slt_ab(i32 %a, i32 %b) {
+; CHECK-LABEL: define i32 @scmp_zero_slt_ab(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[RETVAL_0:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, %b
+ %cmp1.inv = icmp slt i32 %a, %b
+ %. = select i1 %cmp1.inv, i32 -1, i32 1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @scmp_zero_sgt_ab(i32 %a, i32 %b) {
+; CHECK-LABEL: define i32 @scmp_zero_sgt_ab(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[RETVAL_0:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, %b
+ %cmp1.inv = icmp sgt i32 %a, %b
+ %. = select i1 %cmp1.inv, i32 1, i32 -1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
; Negative test: true value of outer select is not zero
define i8 @scmp_from_select_eq_and_gt_neg1(i32 %x, i32 %y) {
; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt_neg1(
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll b/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll
index 7b30edb..71dad41 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes=instsimplify -S < %s | FileCheck %s
+; RUN: opt -passes=instsimplify -use-constant-int-for-fixed-length-splat -S < %s | FileCheck %s
; Test that intrinsics wasm call are constant folded
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/bitcount.ll b/llvm/test/Transforms/InstSimplify/ConstProp/bitcount.ll
index 68b45a94..f68b85e 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/bitcount.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/bitcount.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
+; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -use-constant-int-for-scalable-splat -S | FileCheck %s
declare i31 @llvm.ctpop.i31(i31 %val)
declare i32 @llvm.cttz.i32(i32 %val, i1)
@@ -120,6 +121,22 @@ define <2 x i31> @ctpop_vector() {
ret <2 x i31> %x
}
+define <2 x i31> @ctpop_vector_splat_v2i31() {
+; CHECK-LABEL: @ctpop_vector_splat_v2i31(
+; CHECK-NEXT: ret <2 x i31> splat (i31 1)
+;
+ %x = call <2 x i31> @llvm.ctpop.v2i31(<2 x i31> splat(i31 16))
+ ret <2 x i31> %x
+}
+
+define <vscale x 2 x i31> @ctpop_vector_splat_nxv2i31() {
+; CHECK-LABEL: @ctpop_vector_splat_nxv2i31(
+; CHECK-NEXT: ret <vscale x 2 x i31> splat (i31 1)
+;
+ %x = call <vscale x 2 x i31> @llvm.ctpop.nxv2i31(<vscale x 2 x i31> splat(i31 16))
+ ret <vscale x 2 x i31> %x
+}
+
define <2 x i31> @ctpop_vector_undef() {
; CHECK-LABEL: @ctpop_vector_undef(
; CHECK-NEXT: ret <2 x i31> zeroinitializer
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/bitreverse.ll b/llvm/test/Transforms/InstSimplify/ConstProp/bitreverse.ll
new file mode 100644
index 0000000..409141a
--- /dev/null
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/bitreverse.ll
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
+; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -use-constant-int-for-scalable-splat -S | FileCheck %s
+
+define i16 @W() {
+; CHECK-LABEL: define i16 @W() {
+; CHECK-NEXT: ret i16 -32768
+;
+ %Z = call i16 @llvm.bitreverse.i16(i16 1)
+ ret i16 %Z
+}
+
+define i32 @X() {
+; CHECK-LABEL: define i32 @X() {
+; CHECK-NEXT: ret i32 -2147483648
+;
+ %Z = call i32 @llvm.bitreverse.i32(i32 1)
+ ret i32 %Z
+}
+
+define i64 @Y() {
+; CHECK-LABEL: define i64 @Y() {
+; CHECK-NEXT: ret i64 -9223372036854775808
+;
+ %Z = call i64 @llvm.bitreverse.i64(i64 1)
+ ret i64 %Z
+}
+
+define i80 @Z() {
+; CHECK-LABEL: define i80 @Z() {
+; CHECK-NEXT: ret i80 23777929115895377691656
+;
+ %Z = call i80 @llvm.bitreverse.i80(i80 76151636403560493650080)
+ ret i80 %Z
+}
+
+define <4 x i32> @bitreverse_splat_v4i32() {
+; CHECK-LABEL: define <4 x i32> @bitreverse_splat_v4i32() {
+; CHECK-NEXT: ret <4 x i32> splat (i32 -2147483648)
+;
+ %Z = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> splat(i32 1))
+ ret <4 x i32> %Z
+}
+
+define <vscale x 4 x i32> @bitreverse_splat_nxv4i32() {
+; CHECK-LABEL: define <vscale x 4 x i32> @bitreverse_splat_nxv4i32() {
+; CHECK-NEXT: ret <vscale x 4 x i32> splat (i32 -2147483648)
+;
+ %Z = call <vscale x 4 x i32> @llvm.bitreverse.v4i32(<vscale x 4 x i32> splat(i32 1))
+ ret <vscale x 4 x i32> %Z
+}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/bswap.ll b/llvm/test/Transforms/InstSimplify/ConstProp/bswap.ll
index 42bb733..4db8ced 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/bswap.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/bswap.ll
@@ -2,6 +2,7 @@
; bswap should be constant folded when it is passed a constant argument
; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
+; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -use-constant-int-for-scalable-splat -S | FileCheck %s
declare i16 @llvm.bswap.i16(i16)
@@ -42,3 +43,19 @@ define i80 @Z() {
%Z = call i80 @llvm.bswap.i80( i80 76151636403560493650080 )
ret i80 %Z
}
+
+define <4 x i32> @bswap_splat_v4i32() {
+; CHECK-LABEL: define <4 x i32> @bswap_splat_v4i32() {
+; CHECK-NEXT: ret <4 x i32> splat (i32 16777216)
+;
+ %Z = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> splat(i32 1))
+ ret <4 x i32> %Z
+}
+
+define <vscale x 4 x i32> @bswap_splat_nxv4i32() {
+; CHECK-LABEL: define <vscale x 4 x i32> @bswap_splat_nxv4i32() {
+; CHECK-NEXT: ret <vscale x 4 x i32> splat (i32 16777216)
+;
+ %Z = call <vscale x 4 x i32> @llvm.bswap.v4i32(<vscale x 4 x i32> splat(i32 1))
+ ret <vscale x 4 x i32> %Z
+}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
index e994921..9f9e3f9 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
+; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -S | FileCheck %s
declare i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %a)
declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %a)
diff --git a/llvm/test/Transforms/LoopPredication/preserve-bpi.ll b/llvm/test/Transforms/LoopPredication/preserve-bpi.ll
deleted file mode 100644
index 7fbb197..0000000
--- a/llvm/test/Transforms/LoopPredication/preserve-bpi.ll
+++ /dev/null
@@ -1,60 +0,0 @@
-; RUN: opt -mtriple=x86_64 -passes='loop-mssa(loop-predication,licm,simple-loop-unswitch<nontrivial>,loop-simplifycfg)' -debug-pass-manager -debug-only=branch-prob -S < %s 2>&1 | FileCheck %s
-
-; REQUIRES: asserts
-
-; This test is to solely check that we do not run BPI every single time loop
-; predication is invoked (since BPI is preserved as part of
-; LoopStandardAnalysisResults).
-declare void @llvm.experimental.guard(i1, ...)
-
-; CHECK: Running pass: LoopPredicationPass on loop
-; CHECK-NEXT: Running pass: LICMPass on loop
-; CHECK-NEXT: Running pass: SimpleLoopUnswitchPass on loop
-; CHECK-NEXT: Running analysis: OuterAnalysisManagerProxy
-; CHECK-NEXT: Running pass: LoopPredicationPass on loop
-; CHECK-NEXT: Running pass: LICMPass on loop
-; CHECK-NEXT: Running pass: SimpleLoopUnswitchPass on loop
-; CHECK-NEXT: Running pass: LoopSimplifyCFGPass on loop
-
-define i32 @unsigned_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
-entry:
- %tmp5 = icmp eq i32 %n, 0
- br i1 %tmp5, label %exit, label %loop.preheader
-
-loop.preheader: ; preds = %entry
- br label %loop
-
-loop: ; preds = %guarded, %loop.preheader
- %loop.acc = phi i32 [ %loop.acc.next, %guarded ], [ 0, %loop.preheader ]
- %i = phi i32 [ %i.next, %guarded ], [ 0, %loop.preheader ]
- %within.bounds = icmp ult i32 %i, %length
- %widenable_cond = call i1 @llvm.experimental.widenable.condition()
- %exiplicit_guard_cond = and i1 %within.bounds, %widenable_cond
- br i1 %exiplicit_guard_cond, label %guarded, label %deopt, !prof !0
-
-deopt: ; preds = %loop
- %deoptcall = call i32 (...) @llvm.experimental.deoptimize.i32(i32 9) [ "deopt"() ]
- ret i32 %deoptcall
-
-guarded: ; preds = %loop
- %i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
- %array.i = load i32, ptr %array.i.ptr, align 4
- %loop.acc.next = add i32 %loop.acc, %array.i
- %i.next = add nuw i32 %i, 1
- %continue = icmp ult i32 %i.next, %n
- br i1 %continue, label %loop, label %exit, !prof !2
-
-exit: ; preds = %guarded, %entry
- %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %guarded ]
- ret i32 %result
-}
-
-declare i32 @llvm.experimental.deoptimize.i32(...)
-declare i1 @llvm.experimental.widenable.condition() #0
-
-attributes #0 = { inaccessiblememonly nounwind }
-
-!0 = !{!"branch_weights", i32 1048576, i32 1}
-!1 = !{i32 1, i32 -2147483648}
-!2 = !{!"branch_weights", i32 1024, i32 1}
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
index b106f99..1153d18 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
@@ -6,7 +6,7 @@
; Check that the addresses for a scalarized memory access is not extracted
; from a vector register.
-define i32 @foo(ptr nocapture %A) {
+define void @foo(ptr nocapture %A) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
@@ -27,7 +27,7 @@ define i32 @foo(ptr nocapture %A) {
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.end:
-; CHECK-NEXT: ret i32 poison
+; CHECK-NEXT: ret void
;
entry:
@@ -44,12 +44,12 @@ for.body:
br i1 %exitcond, label %for.end, label %for.body
for.end:
- ret i32 poison
+ ret void
}
; Check that a load of address is scalarized.
-define i32 @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) {
+define void @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) {
; CHECK-LABEL: @foo1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
@@ -74,7 +74,7 @@ define i32 @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) {
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.end:
-; CHECK-NEXT: ret i32 poison
+; CHECK-NEXT: ret void
;
entry:
@@ -93,5 +93,5 @@ for.body:
br i1 %exitcond, label %for.end, label %for.body
for.end:
- ret i32 poison
+ ret void
}
diff --git a/llvm/test/Transforms/LoopVectorize/pr48832.ll b/llvm/test/Transforms/LoopVectorize/pr48832.ll
index b89be88..c6ebe85 100644
--- a/llvm/test/Transforms/LoopVectorize/pr48832.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr48832.ll
@@ -23,7 +23,7 @@ for.body: ; preds = %for.cond
br i1 true, label %cond.false, label %land.rhs
land.rhs: ; preds = %for.body
- br i1 poison, label %cond.end, label %cond.false
+ br i1 false, label %cond.end, label %cond.false
cond.false: ; preds = %for.body, %land.rhs
br label %cond.end
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/data-layout-multiply-fused.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/data-layout-multiply-fused.ll
index d281905..abd1d96 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/data-layout-multiply-fused.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/data-layout-multiply-fused.ll
@@ -1,5 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=lower-matrix-intrinsics,instcombine -data-layout='p:128:128' -fuse-matrix-use-loops=false -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix -verify-dom-info %s -S | FileCheck %s --check-prefix=PTR128
; RUN: opt -passes=lower-matrix-intrinsics,instcombine -data-layout='p:64:64' -fuse-matrix-use-loops=false -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix -verify-dom-info %s -S | FileCheck %s --check-prefix=PTR64
; RUN: opt -passes=lower-matrix-intrinsics,instcombine -data-layout='p:32:32' -fuse-matrix-use-loops=false -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix -verify-dom-info %s -S | FileCheck %s --check-prefix=PTR32
@@ -10,179 +9,6 @@
target triple = "aarch64-unknown-unknown"
define void @multiply(ptr %A, ptr %B, ptr %C) {
-; PTR128-LABEL: @multiply(
-; PTR128-NEXT: entry:
-; PTR128-NEXT: [[STORE_BEGIN:%.*]] = ptrtoint ptr [[C:%.*]] to i128
-; PTR128-NEXT: [[STORE_END:%.*]] = add nuw nsw i128 [[STORE_BEGIN]], 128
-; PTR128-NEXT: [[LOAD_BEGIN:%.*]] = ptrtoint ptr [[A:%.*]] to i128
-; PTR128-NEXT: [[TMP0:%.*]] = icmp ugt i128 [[STORE_END]], [[LOAD_BEGIN]]
-; PTR128-NEXT: br i1 [[TMP0]], label [[ALIAS_CONT:%.*]], label [[NO_ALIAS:%.*]]
-; PTR128: alias_cont:
-; PTR128-NEXT: [[LOAD_END:%.*]] = add nuw nsw i128 [[LOAD_BEGIN]], 128
-; PTR128-NEXT: [[TMP1:%.*]] = icmp ugt i128 [[LOAD_END]], [[STORE_BEGIN]]
-; PTR128-NEXT: br i1 [[TMP1]], label [[COPY:%.*]], label [[NO_ALIAS]]
-; PTR128: copy:
-; PTR128-NEXT: [[TMP2:%.*]] = alloca [16 x double], align 8
-; PTR128-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 dereferenceable(128) [[TMP2]], ptr noundef nonnull align 8 dereferenceable(128) [[A]], i64 128, i1 false)
-; PTR128-NEXT: br label [[NO_ALIAS]]
-; PTR128: no_alias:
-; PTR128-NEXT: [[TMP3:%.*]] = phi ptr [ [[A]], [[ENTRY:%.*]] ], [ [[A]], [[ALIAS_CONT]] ], [ [[TMP2]], [[COPY]] ]
-; PTR128-NEXT: [[STORE_BEGIN4:%.*]] = ptrtoint ptr [[C]] to i128
-; PTR128-NEXT: [[STORE_END5:%.*]] = add nuw nsw i128 [[STORE_BEGIN4]], 128
-; PTR128-NEXT: [[LOAD_BEGIN6:%.*]] = ptrtoint ptr [[A]] to i128
-; PTR128-NEXT: [[TMP4:%.*]] = icmp ugt i128 [[STORE_END5]], [[LOAD_BEGIN6]]
-; PTR128-NEXT: br i1 [[TMP4]], label [[ALIAS_CONT1:%.*]], label [[NO_ALIAS3:%.*]]
-; PTR128: alias_cont1:
-; PTR128-NEXT: [[LOAD_END7:%.*]] = add nuw nsw i128 [[LOAD_BEGIN6]], 128
-; PTR128-NEXT: [[TMP5:%.*]] = icmp ugt i128 [[LOAD_END7]], [[STORE_BEGIN4]]
-; PTR128-NEXT: br i1 [[TMP5]], label [[COPY2:%.*]], label [[NO_ALIAS3]]
-; PTR128: copy2:
-; PTR128-NEXT: [[TMP6:%.*]] = alloca [16 x double], align 8
-; PTR128-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 dereferenceable(128) [[TMP6]], ptr noundef nonnull align 8 dereferenceable(128) [[A]], i64 128, i1 false)
-; PTR128-NEXT: br label [[NO_ALIAS3]]
-; PTR128: no_alias3:
-; PTR128-NEXT: [[TMP7:%.*]] = phi ptr [ [[A]], [[NO_ALIAS]] ], [ [[A]], [[ALIAS_CONT1]] ], [ [[TMP6]], [[COPY2]] ]
-; PTR128-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, ptr [[TMP3]], align 8
-; PTR128-NEXT: [[VEC_GEP:%.*]] = getelementptr i8, ptr [[TMP3]], i128 32
-; PTR128-NEXT: [[COL_LOAD8:%.*]] = load <2 x double>, ptr [[VEC_GEP]], align 8
-; PTR128-NEXT: [[COL_LOAD9:%.*]] = load <2 x double>, ptr [[TMP7]], align 8
-; PTR128-NEXT: [[VEC_GEP10:%.*]] = getelementptr i8, ptr [[TMP7]], i128 32
-; PTR128-NEXT: [[COL_LOAD11:%.*]] = load <2 x double>, ptr [[VEC_GEP10]], align 8
-; PTR128-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x double> [[COL_LOAD9]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP8:%.*]] = fmul contract <2 x double> [[COL_LOAD]], [[SPLAT_SPLAT]]
-; PTR128-NEXT: [[SPLAT_SPLAT14:%.*]] = shufflevector <2 x double> [[COL_LOAD9]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP9:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD8]], <2 x double> [[SPLAT_SPLAT14]], <2 x double> [[TMP8]])
-; PTR128-NEXT: [[SPLAT_SPLAT17:%.*]] = shufflevector <2 x double> [[COL_LOAD11]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP10:%.*]] = fmul contract <2 x double> [[COL_LOAD]], [[SPLAT_SPLAT17]]
-; PTR128-NEXT: [[SPLAT_SPLAT20:%.*]] = shufflevector <2 x double> [[COL_LOAD11]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP11:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD8]], <2 x double> [[SPLAT_SPLAT20]], <2 x double> [[TMP10]])
-; PTR128-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP3]], i128 64
-; PTR128-NEXT: [[COL_LOAD21:%.*]] = load <2 x double>, ptr [[TMP12]], align 8
-; PTR128-NEXT: [[VEC_GEP22:%.*]] = getelementptr i8, ptr [[TMP3]], i128 96
-; PTR128-NEXT: [[COL_LOAD23:%.*]] = load <2 x double>, ptr [[VEC_GEP22]], align 8
-; PTR128-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP7]], i128 16
-; PTR128-NEXT: [[COL_LOAD24:%.*]] = load <2 x double>, ptr [[TMP13]], align 8
-; PTR128-NEXT: [[VEC_GEP25:%.*]] = getelementptr i8, ptr [[TMP7]], i128 48
-; PTR128-NEXT: [[COL_LOAD26:%.*]] = load <2 x double>, ptr [[VEC_GEP25]], align 8
-; PTR128-NEXT: [[SPLAT_SPLAT30:%.*]] = shufflevector <2 x double> [[COL_LOAD24]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP14:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD21]], <2 x double> [[SPLAT_SPLAT30]], <2 x double> [[TMP9]])
-; PTR128-NEXT: [[SPLAT_SPLAT33:%.*]] = shufflevector <2 x double> [[COL_LOAD24]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP15:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD23]], <2 x double> [[SPLAT_SPLAT33]], <2 x double> [[TMP14]])
-; PTR128-NEXT: [[SPLAT_SPLAT37:%.*]] = shufflevector <2 x double> [[COL_LOAD26]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP16:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD21]], <2 x double> [[SPLAT_SPLAT37]], <2 x double> [[TMP11]])
-; PTR128-NEXT: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x double> [[COL_LOAD26]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP17:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD23]], <2 x double> [[SPLAT_SPLAT40]], <2 x double> [[TMP16]])
-; PTR128-NEXT: store <2 x double> [[TMP15]], ptr [[C]], align 8
-; PTR128-NEXT: [[VEC_GEP41:%.*]] = getelementptr i8, ptr [[C]], i128 32
-; PTR128-NEXT: store <2 x double> [[TMP17]], ptr [[VEC_GEP41]], align 8
-; PTR128-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP3]], i128 16
-; PTR128-NEXT: [[COL_LOAD42:%.*]] = load <2 x double>, ptr [[TMP18]], align 8
-; PTR128-NEXT: [[VEC_GEP43:%.*]] = getelementptr i8, ptr [[TMP3]], i128 48
-; PTR128-NEXT: [[COL_LOAD44:%.*]] = load <2 x double>, ptr [[VEC_GEP43]], align 8
-; PTR128-NEXT: [[COL_LOAD45:%.*]] = load <2 x double>, ptr [[TMP7]], align 8
-; PTR128-NEXT: [[VEC_GEP46:%.*]] = getelementptr i8, ptr [[TMP7]], i128 32
-; PTR128-NEXT: [[COL_LOAD47:%.*]] = load <2 x double>, ptr [[VEC_GEP46]], align 8
-; PTR128-NEXT: [[SPLAT_SPLAT50:%.*]] = shufflevector <2 x double> [[COL_LOAD45]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP19:%.*]] = fmul contract <2 x double> [[COL_LOAD42]], [[SPLAT_SPLAT50]]
-; PTR128-NEXT: [[SPLAT_SPLAT53:%.*]] = shufflevector <2 x double> [[COL_LOAD45]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP20:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD44]], <2 x double> [[SPLAT_SPLAT53]], <2 x double> [[TMP19]])
-; PTR128-NEXT: [[SPLAT_SPLAT56:%.*]] = shufflevector <2 x double> [[COL_LOAD47]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP21:%.*]] = fmul contract <2 x double> [[COL_LOAD42]], [[SPLAT_SPLAT56]]
-; PTR128-NEXT: [[SPLAT_SPLAT59:%.*]] = shufflevector <2 x double> [[COL_LOAD47]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP22:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD44]], <2 x double> [[SPLAT_SPLAT59]], <2 x double> [[TMP21]])
-; PTR128-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[TMP3]], i128 80
-; PTR128-NEXT: [[COL_LOAD60:%.*]] = load <2 x double>, ptr [[TMP23]], align 8
-; PTR128-NEXT: [[VEC_GEP61:%.*]] = getelementptr i8, ptr [[TMP3]], i128 112
-; PTR128-NEXT: [[COL_LOAD62:%.*]] = load <2 x double>, ptr [[VEC_GEP61]], align 8
-; PTR128-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP7]], i128 16
-; PTR128-NEXT: [[COL_LOAD63:%.*]] = load <2 x double>, ptr [[TMP24]], align 8
-; PTR128-NEXT: [[VEC_GEP64:%.*]] = getelementptr i8, ptr [[TMP7]], i128 48
-; PTR128-NEXT: [[COL_LOAD65:%.*]] = load <2 x double>, ptr [[VEC_GEP64]], align 8
-; PTR128-NEXT: [[SPLAT_SPLAT69:%.*]] = shufflevector <2 x double> [[COL_LOAD63]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP25:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD60]], <2 x double> [[SPLAT_SPLAT69]], <2 x double> [[TMP20]])
-; PTR128-NEXT: [[SPLAT_SPLAT72:%.*]] = shufflevector <2 x double> [[COL_LOAD63]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP26:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD62]], <2 x double> [[SPLAT_SPLAT72]], <2 x double> [[TMP25]])
-; PTR128-NEXT: [[SPLAT_SPLAT76:%.*]] = shufflevector <2 x double> [[COL_LOAD65]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP27:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD60]], <2 x double> [[SPLAT_SPLAT76]], <2 x double> [[TMP22]])
-; PTR128-NEXT: [[SPLAT_SPLAT79:%.*]] = shufflevector <2 x double> [[COL_LOAD65]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP28:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD62]], <2 x double> [[SPLAT_SPLAT79]], <2 x double> [[TMP27]])
-; PTR128-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[C]], i128 16
-; PTR128-NEXT: store <2 x double> [[TMP26]], ptr [[TMP29]], align 8
-; PTR128-NEXT: [[VEC_GEP80:%.*]] = getelementptr i8, ptr [[C]], i128 48
-; PTR128-NEXT: store <2 x double> [[TMP28]], ptr [[VEC_GEP80]], align 8
-; PTR128-NEXT: [[COL_LOAD81:%.*]] = load <2 x double>, ptr [[TMP3]], align 8
-; PTR128-NEXT: [[VEC_GEP82:%.*]] = getelementptr i8, ptr [[TMP3]], i128 32
-; PTR128-NEXT: [[COL_LOAD83:%.*]] = load <2 x double>, ptr [[VEC_GEP82]], align 8
-; PTR128-NEXT: [[TMP30:%.*]] = getelementptr i8, ptr [[TMP7]], i128 64
-; PTR128-NEXT: [[COL_LOAD84:%.*]] = load <2 x double>, ptr [[TMP30]], align 8
-; PTR128-NEXT: [[VEC_GEP85:%.*]] = getelementptr i8, ptr [[TMP7]], i128 96
-; PTR128-NEXT: [[COL_LOAD86:%.*]] = load <2 x double>, ptr [[VEC_GEP85]], align 8
-; PTR128-NEXT: [[SPLAT_SPLAT89:%.*]] = shufflevector <2 x double> [[COL_LOAD84]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP31:%.*]] = fmul contract <2 x double> [[COL_LOAD81]], [[SPLAT_SPLAT89]]
-; PTR128-NEXT: [[SPLAT_SPLAT92:%.*]] = shufflevector <2 x double> [[COL_LOAD84]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP32:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD83]], <2 x double> [[SPLAT_SPLAT92]], <2 x double> [[TMP31]])
-; PTR128-NEXT: [[SPLAT_SPLAT95:%.*]] = shufflevector <2 x double> [[COL_LOAD86]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP33:%.*]] = fmul contract <2 x double> [[COL_LOAD81]], [[SPLAT_SPLAT95]]
-; PTR128-NEXT: [[SPLAT_SPLAT98:%.*]] = shufflevector <2 x double> [[COL_LOAD86]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP34:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD83]], <2 x double> [[SPLAT_SPLAT98]], <2 x double> [[TMP33]])
-; PTR128-NEXT: [[TMP35:%.*]] = getelementptr i8, ptr [[TMP3]], i128 64
-; PTR128-NEXT: [[COL_LOAD99:%.*]] = load <2 x double>, ptr [[TMP35]], align 8
-; PTR128-NEXT: [[VEC_GEP100:%.*]] = getelementptr i8, ptr [[TMP3]], i128 96
-; PTR128-NEXT: [[COL_LOAD101:%.*]] = load <2 x double>, ptr [[VEC_GEP100]], align 8
-; PTR128-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[TMP7]], i128 80
-; PTR128-NEXT: [[COL_LOAD102:%.*]] = load <2 x double>, ptr [[TMP36]], align 8
-; PTR128-NEXT: [[VEC_GEP103:%.*]] = getelementptr i8, ptr [[TMP7]], i128 112
-; PTR128-NEXT: [[COL_LOAD104:%.*]] = load <2 x double>, ptr [[VEC_GEP103]], align 8
-; PTR128-NEXT: [[SPLAT_SPLAT108:%.*]] = shufflevector <2 x double> [[COL_LOAD102]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP37:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD99]], <2 x double> [[SPLAT_SPLAT108]], <2 x double> [[TMP32]])
-; PTR128-NEXT: [[SPLAT_SPLAT111:%.*]] = shufflevector <2 x double> [[COL_LOAD102]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP38:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD101]], <2 x double> [[SPLAT_SPLAT111]], <2 x double> [[TMP37]])
-; PTR128-NEXT: [[SPLAT_SPLAT115:%.*]] = shufflevector <2 x double> [[COL_LOAD104]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP39:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD99]], <2 x double> [[SPLAT_SPLAT115]], <2 x double> [[TMP34]])
-; PTR128-NEXT: [[SPLAT_SPLAT118:%.*]] = shufflevector <2 x double> [[COL_LOAD104]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP40:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD101]], <2 x double> [[SPLAT_SPLAT118]], <2 x double> [[TMP39]])
-; PTR128-NEXT: [[TMP41:%.*]] = getelementptr i8, ptr [[C]], i128 64
-; PTR128-NEXT: store <2 x double> [[TMP38]], ptr [[TMP41]], align 8
-; PTR128-NEXT: [[VEC_GEP119:%.*]] = getelementptr i8, ptr [[C]], i128 96
-; PTR128-NEXT: store <2 x double> [[TMP40]], ptr [[VEC_GEP119]], align 8
-; PTR128-NEXT: [[TMP42:%.*]] = getelementptr i8, ptr [[TMP3]], i128 16
-; PTR128-NEXT: [[COL_LOAD120:%.*]] = load <2 x double>, ptr [[TMP42]], align 8
-; PTR128-NEXT: [[VEC_GEP121:%.*]] = getelementptr i8, ptr [[TMP3]], i128 48
-; PTR128-NEXT: [[COL_LOAD122:%.*]] = load <2 x double>, ptr [[VEC_GEP121]], align 8
-; PTR128-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP7]], i128 64
-; PTR128-NEXT: [[COL_LOAD123:%.*]] = load <2 x double>, ptr [[TMP43]], align 8
-; PTR128-NEXT: [[VEC_GEP124:%.*]] = getelementptr i8, ptr [[TMP7]], i128 96
-; PTR128-NEXT: [[COL_LOAD125:%.*]] = load <2 x double>, ptr [[VEC_GEP124]], align 8
-; PTR128-NEXT: [[SPLAT_SPLAT128:%.*]] = shufflevector <2 x double> [[COL_LOAD123]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP44:%.*]] = fmul contract <2 x double> [[COL_LOAD120]], [[SPLAT_SPLAT128]]
-; PTR128-NEXT: [[SPLAT_SPLAT131:%.*]] = shufflevector <2 x double> [[COL_LOAD123]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP45:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD122]], <2 x double> [[SPLAT_SPLAT131]], <2 x double> [[TMP44]])
-; PTR128-NEXT: [[SPLAT_SPLAT134:%.*]] = shufflevector <2 x double> [[COL_LOAD125]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP46:%.*]] = fmul contract <2 x double> [[COL_LOAD120]], [[SPLAT_SPLAT134]]
-; PTR128-NEXT: [[SPLAT_SPLAT137:%.*]] = shufflevector <2 x double> [[COL_LOAD125]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP47:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD122]], <2 x double> [[SPLAT_SPLAT137]], <2 x double> [[TMP46]])
-; PTR128-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP3]], i128 80
-; PTR128-NEXT: [[COL_LOAD138:%.*]] = load <2 x double>, ptr [[TMP48]], align 8
-; PTR128-NEXT: [[VEC_GEP139:%.*]] = getelementptr i8, ptr [[TMP3]], i128 112
-; PTR128-NEXT: [[COL_LOAD140:%.*]] = load <2 x double>, ptr [[VEC_GEP139]], align 8
-; PTR128-NEXT: [[TMP49:%.*]] = getelementptr i8, ptr [[TMP7]], i128 80
-; PTR128-NEXT: [[COL_LOAD141:%.*]] = load <2 x double>, ptr [[TMP49]], align 8
-; PTR128-NEXT: [[VEC_GEP142:%.*]] = getelementptr i8, ptr [[TMP7]], i128 112
-; PTR128-NEXT: [[COL_LOAD143:%.*]] = load <2 x double>, ptr [[VEC_GEP142]], align 8
-; PTR128-NEXT: [[SPLAT_SPLAT147:%.*]] = shufflevector <2 x double> [[COL_LOAD141]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP50:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD138]], <2 x double> [[SPLAT_SPLAT147]], <2 x double> [[TMP45]])
-; PTR128-NEXT: [[SPLAT_SPLAT150:%.*]] = shufflevector <2 x double> [[COL_LOAD141]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP51:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD140]], <2 x double> [[SPLAT_SPLAT150]], <2 x double> [[TMP50]])
-; PTR128-NEXT: [[SPLAT_SPLAT154:%.*]] = shufflevector <2 x double> [[COL_LOAD143]], <2 x double> poison, <2 x i32> zeroinitializer
-; PTR128-NEXT: [[TMP52:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD138]], <2 x double> [[SPLAT_SPLAT154]], <2 x double> [[TMP47]])
-; PTR128-NEXT: [[SPLAT_SPLAT157:%.*]] = shufflevector <2 x double> [[COL_LOAD143]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
-; PTR128-NEXT: [[TMP53:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD140]], <2 x double> [[SPLAT_SPLAT157]], <2 x double> [[TMP52]])
-; PTR128-NEXT: [[TMP54:%.*]] = getelementptr i8, ptr [[C]], i128 80
-; PTR128-NEXT: store <2 x double> [[TMP51]], ptr [[TMP54]], align 8
-; PTR128-NEXT: [[VEC_GEP158:%.*]] = getelementptr i8, ptr [[C]], i128 112
-; PTR128-NEXT: store <2 x double> [[TMP53]], ptr [[VEC_GEP158]], align 8
-; PTR128-NEXT: ret void
-;
; PTR64-LABEL: @multiply(
; PTR64-NEXT: entry:
; PTR64-NEXT: [[STORE_BEGIN:%.*]] = ptrtoint ptr [[C:%.*]] to i64
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/data-layout.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/data-layout.ll
index 87def6b..3d05014 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/data-layout.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/data-layout.ll
@@ -1,5 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes='lower-matrix-intrinsics' -data-layout='p:128:128' -S < %s | FileCheck %s --check-prefix=PTR128
; RUN: opt -passes='lower-matrix-intrinsics' -data-layout='p:64:64' -S < %s | FileCheck %s --check-prefix=PTR64
; RUN: opt -passes='lower-matrix-intrinsics' -data-layout='p:32:32' -S < %s | FileCheck %s --check-prefix=PTR32
@@ -7,128 +6,13 @@
; the need to emit `libc` calls), we perform strided index calculations using
; the same pointer bit-width as the matrix pointers, as determined by the data
; layout. To verify this behaviour, this test runs several strided loads and
-; stores through the lowering pass with (32|64|128)-bit pointers, and verifies
-; the generated code extends / truncates strides accordingly. Similarly,
+; stores through the lowering pass with (32|64)-bit pointers, and verifies the
+; generated code extends / truncates strides accordingly. Similarly,
; `data-layout-multiply-fused.ll` adopts this approach to verify the same
; behaviour for index calculations emitted while lowering fused matrix
; multiplies.
-define <9 x double> @strided_load_3x3_i128(ptr %in, i128 %stride) {
-; PTR128-LABEL: @strided_load_3x3_i128(
-; PTR128-NEXT: entry:
-; PTR128-NEXT: [[VEC_START:%.*]] = mul i128 0, [[STRIDE:%.*]]
-; PTR128-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN:%.*]], i128 [[VEC_START]]
-; PTR128-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8
-; PTR128-NEXT: [[VEC_START1:%.*]] = mul i128 1, [[STRIDE]]
-; PTR128-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i128 [[VEC_START1]]
-; PTR128-NEXT: [[COL_LOAD3:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8
-; PTR128-NEXT: [[VEC_START4:%.*]] = mul i128 2, [[STRIDE]]
-; PTR128-NEXT: [[VEC_GEP5:%.*]] = getelementptr double, ptr [[IN]], i128 [[VEC_START4]]
-; PTR128-NEXT: [[COL_LOAD6:%.*]] = load <3 x double>, ptr [[VEC_GEP5]], align 8
-; PTR128-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD3]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
-; PTR128-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD6]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison>
-; PTR128-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-; PTR128-NEXT: ret <9 x double> [[TMP2]]
-;
-; PTR64-LABEL: @strided_load_3x3_i128(
-; PTR64-NEXT: entry:
-; PTR64-NEXT: [[STRIDE_CAST:%.*]] = trunc i128 [[STRIDE:%.*]] to i64
-; PTR64-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE_CAST]]
-; PTR64-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN:%.*]], i64 [[VEC_START]]
-; PTR64-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8
-; PTR64-NEXT: [[VEC_START1:%.*]] = mul i64 1, [[STRIDE_CAST]]
-; PTR64-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i64 [[VEC_START1]]
-; PTR64-NEXT: [[COL_LOAD3:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8
-; PTR64-NEXT: [[VEC_START4:%.*]] = mul i64 2, [[STRIDE_CAST]]
-; PTR64-NEXT: [[VEC_GEP5:%.*]] = getelementptr double, ptr [[IN]], i64 [[VEC_START4]]
-; PTR64-NEXT: [[COL_LOAD6:%.*]] = load <3 x double>, ptr [[VEC_GEP5]], align 8
-; PTR64-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD3]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
-; PTR64-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD6]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison>
-; PTR64-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-; PTR64-NEXT: ret <9 x double> [[TMP2]]
-;
-; PTR32-LABEL: @strided_load_3x3_i128(
-; PTR32-NEXT: entry:
-; PTR32-NEXT: [[STRIDE_CAST:%.*]] = trunc i128 [[STRIDE:%.*]] to i32
-; PTR32-NEXT: [[VEC_START:%.*]] = mul i32 0, [[STRIDE_CAST]]
-; PTR32-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN:%.*]], i32 [[VEC_START]]
-; PTR32-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8
-; PTR32-NEXT: [[VEC_START1:%.*]] = mul i32 1, [[STRIDE_CAST]]
-; PTR32-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i32 [[VEC_START1]]
-; PTR32-NEXT: [[COL_LOAD3:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8
-; PTR32-NEXT: [[VEC_START4:%.*]] = mul i32 2, [[STRIDE_CAST]]
-; PTR32-NEXT: [[VEC_GEP5:%.*]] = getelementptr double, ptr [[IN]], i32 [[VEC_START4]]
-; PTR32-NEXT: [[COL_LOAD6:%.*]] = load <3 x double>, ptr [[VEC_GEP5]], align 8
-; PTR32-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD3]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
-; PTR32-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD6]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison>
-; PTR32-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-; PTR32-NEXT: ret <9 x double> [[TMP2]]
-;
-entry:
- %load = call <9 x double> @llvm.matrix.column.major.load.v9f64.i128(ptr %in, i128 %stride, i1 false, i32 3, i32 3)
- ret <9 x double> %load
-}
-
-define <9 x double> @strided_load_3x3_const_stride_i128(ptr %in) {
-; PTR128-LABEL: @strided_load_3x3_const_stride_i128(
-; PTR128-NEXT: entry:
-; PTR128-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[IN:%.*]], align 8
-; PTR128-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN]], i128 16
-; PTR128-NEXT: [[COL_LOAD1:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8
-; PTR128-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i128 32
-; PTR128-NEXT: [[COL_LOAD3:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8
-; PTR128-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD1]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
-; PTR128-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD3]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison>
-; PTR128-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-; PTR128-NEXT: ret <9 x double> [[TMP2]]
-;
-; PTR64-LABEL: @strided_load_3x3_const_stride_i128(
-; PTR64-NEXT: entry:
-; PTR64-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[IN:%.*]], align 8
-; PTR64-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN]], i64 16
-; PTR64-NEXT: [[COL_LOAD1:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8
-; PTR64-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i64 32
-; PTR64-NEXT: [[COL_LOAD3:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8
-; PTR64-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD1]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
-; PTR64-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD3]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison>
-; PTR64-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-; PTR64-NEXT: ret <9 x double> [[TMP2]]
-;
-; PTR32-LABEL: @strided_load_3x3_const_stride_i128(
-; PTR32-NEXT: entry:
-; PTR32-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[IN:%.*]], align 8
-; PTR32-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN]], i32 16
-; PTR32-NEXT: [[COL_LOAD1:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8
-; PTR32-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i32 32
-; PTR32-NEXT: [[COL_LOAD3:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8
-; PTR32-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD1]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
-; PTR32-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD3]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison>
-; PTR32-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-; PTR32-NEXT: ret <9 x double> [[TMP2]]
-;
-entry:
- %load = call <9 x double> @llvm.matrix.column.major.load.v9f64.i128(ptr %in, i128 16, i1 false, i32 3, i32 3)
- ret <9 x double> %load
-}
-
define <9 x double> @strided_load_3x3_i64(ptr %in, i64 %stride) {
-; PTR128-LABEL: @strided_load_3x3_i64(
-; PTR128-NEXT: entry:
-; PTR128-NEXT: [[STRIDE_CAST:%.*]] = zext i64 [[STRIDE:%.*]] to i128
-; PTR128-NEXT: [[VEC_START:%.*]] = mul i128 0, [[STRIDE_CAST]]
-; PTR128-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN:%.*]], i128 [[VEC_START]]
-; PTR128-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8
-; PTR128-NEXT: [[VEC_START1:%.*]] = mul i128 1, [[STRIDE_CAST]]
-; PTR128-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i128 [[VEC_START1]]
-; PTR128-NEXT: [[COL_LOAD3:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8
-; PTR128-NEXT: [[VEC_START4:%.*]] = mul i128 2, [[STRIDE_CAST]]
-; PTR128-NEXT: [[VEC_GEP5:%.*]] = getelementptr double, ptr [[IN]], i128 [[VEC_START4]]
-; PTR128-NEXT: [[COL_LOAD6:%.*]] = load <3 x double>, ptr [[VEC_GEP5]], align 8
-; PTR128-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD3]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
-; PTR128-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD6]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison>
-; PTR128-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-; PTR128-NEXT: ret <9 x double> [[TMP2]]
-;
; PTR64-LABEL: @strided_load_3x3_i64(
; PTR64-NEXT: entry:
; PTR64-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]]
@@ -168,18 +52,6 @@ entry:
}
define <9 x double> @strided_load_3x3_const_stride_i64(ptr %in) {
-; PTR128-LABEL: @strided_load_3x3_const_stride_i64(
-; PTR128-NEXT: entry:
-; PTR128-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[IN:%.*]], align 8
-; PTR128-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN]], i128 16
-; PTR128-NEXT: [[COL_LOAD1:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8
-; PTR128-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i128 32
-; PTR128-NEXT: [[COL_LOAD3:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8
-; PTR128-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD1]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
-; PTR128-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD3]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison>
-; PTR128-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-; PTR128-NEXT: ret <9 x double> [[TMP2]]
-;
; PTR64-LABEL: @strided_load_3x3_const_stride_i64(
; PTR64-NEXT: entry:
; PTR64-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[IN:%.*]], align 8
@@ -210,23 +82,6 @@ entry:
}
define <9 x double> @strided_load_3x3_i32(ptr %in, i32 %stride) {
-; PTR128-LABEL: @strided_load_3x3_i32(
-; PTR128-NEXT: entry:
-; PTR128-NEXT: [[STRIDE_CAST:%.*]] = zext i32 [[STRIDE:%.*]] to i128
-; PTR128-NEXT: [[VEC_START:%.*]] = mul i128 0, [[STRIDE_CAST]]
-; PTR128-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN:%.*]], i128 [[VEC_START]]
-; PTR128-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8
-; PTR128-NEXT: [[VEC_START1:%.*]] = mul i128 1, [[STRIDE_CAST]]
-; PTR128-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i128 [[VEC_START1]]
-; PTR128-NEXT: [[COL_LOAD3:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8
-; PTR128-NEXT: [[VEC_START4:%.*]] = mul i128 2, [[STRIDE_CAST]]
-; PTR128-NEXT: [[VEC_GEP5:%.*]] = getelementptr double, ptr [[IN]], i128 [[VEC_START4]]
-; PTR128-NEXT: [[COL_LOAD6:%.*]] = load <3 x double>, ptr [[VEC_GEP5]], align 8
-; PTR128-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD3]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
-; PTR128-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD6]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison>
-; PTR128-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-; PTR128-NEXT: ret <9 x double> [[TMP2]]
-;
; PTR64-LABEL: @strided_load_3x3_i32(
; PTR64-NEXT: entry:
; PTR64-NEXT: [[STRIDE_CAST:%.*]] = zext i32 [[STRIDE:%.*]] to i64
@@ -266,18 +121,6 @@ entry:
}
define <9 x double> @strided_load_3x3_const_stride_i32(ptr %in) {
-; PTR128-LABEL: @strided_load_3x3_const_stride_i32(
-; PTR128-NEXT: entry:
-; PTR128-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[IN:%.*]], align 8
-; PTR128-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN]], i128 16
-; PTR128-NEXT: [[COL_LOAD1:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8
-; PTR128-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i128 32
-; PTR128-NEXT: [[COL_LOAD3:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8
-; PTR128-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD1]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
-; PTR128-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD3]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison>
-; PTR128-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-; PTR128-NEXT: ret <9 x double> [[TMP2]]
-;
; PTR64-LABEL: @strided_load_3x3_const_stride_i32(
; PTR64-NEXT: entry:
; PTR64-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[IN:%.*]], align 8
@@ -307,6 +150,5 @@ entry:
ret <9 x double> %load
}
-declare <9 x double> @llvm.matrix.column.major.load.v9f64.i128(ptr, i128, i1, i32, i32)
declare <9 x double> @llvm.matrix.column.major.load.v9f64.i64(ptr, i64, i1, i32, i32)
declare <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr, i32, i1, i32, i32)
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder-rm.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder-rm.ll
new file mode 100644
index 0000000..4ec5898
--- /dev/null
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder-rm.ll
@@ -0,0 +1,96 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='lower-matrix-intrinsics' -matrix-default-layout=row-major -S < %s | FileCheck --check-prefix=SPLIT_REMAINDER %s
+; RUN: opt -passes='lower-matrix-intrinsics' -matrix-split-matmul-remainder-over-threshold=96 -matrix-default-layout=row-major -S < %s | FileCheck --check-prefix=NO_SPLIT_REMAINDER %s
+; RUN: opt -passes='lower-matrix-intrinsics' -matrix-split-matmul-remainder-over-threshold=64 -matrix-default-layout=row-major -S < %s | FileCheck --check-prefix=SPLIT_REMAINDER %s
+
+; REQUIRES: aarch64-registered-target
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:8:32:64-S128"
+target triple = "aarch64-apple-ios"
+
+define void @matmul(ptr %a, ptr %b, ptr %c) {
+; SPLIT_REMAINDER-LABEL: define void @matmul(
+; SPLIT_REMAINDER-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD:%.*]] = load <3 x float>, ptr [[A]], align 4
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD1:%.*]] = load <3 x float>, ptr [[B]], align 4
+; SPLIT_REMAINDER-NEXT: [[VEC_GEP:%.*]] = getelementptr float, ptr [[B]], i64 3
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD2:%.*]] = load <3 x float>, ptr [[VEC_GEP]], align 4
+; SPLIT_REMAINDER-NEXT: [[VEC_GEP3:%.*]] = getelementptr float, ptr [[B]], i64 6
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD4:%.*]] = load <3 x float>, ptr [[VEC_GEP3]], align 4
+; SPLIT_REMAINDER-NEXT: [[BLOCK:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP1:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP2:%.*]] = fmul <2 x float> [[SPLAT_SPLAT]], [[BLOCK]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK5:%.*]] = shufflevector <3 x float> [[COL_LOAD2]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP3:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 1
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <2 x float> poison, float [[TMP3]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT6]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP4:%.*]] = fmul <2 x float> [[SPLAT_SPLAT7]], [[BLOCK5]]
+; SPLIT_REMAINDER-NEXT: [[TMP5:%.*]] = fadd <2 x float> [[TMP2]], [[TMP4]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK8:%.*]] = shufflevector <3 x float> [[COL_LOAD4]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP6:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 2
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT9:%.*]] = insertelement <2 x float> poison, float [[TMP6]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT10:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT9]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP7:%.*]] = fmul <2 x float> [[SPLAT_SPLAT10]], [[BLOCK8]]
+; SPLIT_REMAINDER-NEXT: [[TMP8:%.*]] = fadd <2 x float> [[TMP5]], [[TMP7]]
+; SPLIT_REMAINDER-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP8]], <2 x float> poison, <3 x i32> <i32 0, i32 1, i32 poison>
+; SPLIT_REMAINDER-NEXT: [[TMP10:%.*]] = shufflevector <3 x float> poison, <3 x float> [[TMP9]], <3 x i32> <i32 3, i32 4, i32 2>
+; SPLIT_REMAINDER-NEXT: [[BLOCK11:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP11:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT12:%.*]] = insertelement <1 x float> poison, float [[TMP11]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT13:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT12]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP12:%.*]] = fmul <1 x float> [[SPLAT_SPLAT13]], [[BLOCK11]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK14:%.*]] = shufflevector <3 x float> [[COL_LOAD2]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP13:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 1
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT15:%.*]] = insertelement <1 x float> poison, float [[TMP13]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT15]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP14:%.*]] = fmul <1 x float> [[SPLAT_SPLAT16]], [[BLOCK14]]
+; SPLIT_REMAINDER-NEXT: [[TMP15:%.*]] = fadd <1 x float> [[TMP12]], [[TMP14]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK17:%.*]] = shufflevector <3 x float> [[COL_LOAD4]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP16:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 2
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT18:%.*]] = insertelement <1 x float> poison, float [[TMP16]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT18]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP17:%.*]] = fmul <1 x float> [[SPLAT_SPLAT19]], [[BLOCK17]]
+; SPLIT_REMAINDER-NEXT: [[TMP18:%.*]] = fadd <1 x float> [[TMP15]], [[TMP17]]
+; SPLIT_REMAINDER-NEXT: [[TMP19:%.*]] = shufflevector <1 x float> [[TMP18]], <1 x float> poison, <3 x i32> <i32 0, i32 poison, i32 poison>
+; SPLIT_REMAINDER-NEXT: [[TMP20:%.*]] = shufflevector <3 x float> [[TMP10]], <3 x float> [[TMP19]], <3 x i32> <i32 0, i32 1, i32 3>
+; SPLIT_REMAINDER-NEXT: store <3 x float> [[TMP20]], ptr [[C]], align 4
+; SPLIT_REMAINDER-NEXT: ret void
+;
+; NO_SPLIT_REMAINDER-LABEL: define void @matmul(
+; NO_SPLIT_REMAINDER-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD:%.*]] = load <3 x float>, ptr [[A]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD1:%.*]] = load <3 x float>, ptr [[B]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[VEC_GEP:%.*]] = getelementptr float, ptr [[B]], i64 3
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD2:%.*]] = load <3 x float>, ptr [[VEC_GEP]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[VEC_GEP3:%.*]] = getelementptr float, ptr [[B]], i64 6
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD4:%.*]] = load <3 x float>, ptr [[VEC_GEP3]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP1:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <3 x float> poison, float [[TMP1]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP2:%.*]] = fmul <3 x float> [[SPLAT_SPLAT]], [[BLOCK]]
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK5:%.*]] = shufflevector <3 x float> [[COL_LOAD2]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP3:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 1
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <3 x float> poison, float [[TMP3]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT6]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP4:%.*]] = fmul <3 x float> [[SPLAT_SPLAT7]], [[BLOCK5]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP5:%.*]] = fadd <3 x float> [[TMP2]], [[TMP4]]
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK8:%.*]] = shufflevector <3 x float> [[COL_LOAD4]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP6:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 2
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT9:%.*]] = insertelement <3 x float> poison, float [[TMP6]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT10:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT9]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP7:%.*]] = fmul <3 x float> [[SPLAT_SPLAT10]], [[BLOCK8]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP8:%.*]] = fadd <3 x float> [[TMP5]], [[TMP7]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP9:%.*]] = shufflevector <3 x float> [[TMP8]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP10:%.*]] = shufflevector <3 x float> poison, <3 x float> [[TMP9]], <3 x i32> <i32 3, i32 4, i32 5>
+; NO_SPLIT_REMAINDER-NEXT: store <3 x float> [[TMP10]], ptr [[C]], align 4
+; NO_SPLIT_REMAINDER-NEXT: ret void
+;
+ %a_load = load <3 x float>, ptr %a, align 4
+ %b_load = load <9 x float>, ptr %b, align 4
+ %matmul = tail call <3 x float> @llvm.matrix.multiply.v3f32.v9f32.v3f32(<3 x float> %a_load, <9 x float> %b_load, i32 1, i32 3, i32 3)
+ store <3 x float> %matmul, ptr %c, align 4
+ ret void
+}
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder.ll
new file mode 100644
index 0000000..fbc2cbc
--- /dev/null
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder.ll
@@ -0,0 +1,96 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck --check-prefix=SPLIT_REMAINDER %s
+; RUN: opt -passes='lower-matrix-intrinsics' -matrix-split-matmul-remainder-over-threshold=96 -S < %s | FileCheck --check-prefix=NO_SPLIT_REMAINDER %s
+; RUN: opt -passes='lower-matrix-intrinsics' -matrix-split-matmul-remainder-over-threshold=64 -S < %s | FileCheck --check-prefix=SPLIT_REMAINDER %s
+
+; REQUIRES: aarch64-registered-target
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:8:32:64-S128"
+target triple = "aarch64-apple-ios"
+
+define void @matmul(ptr %a, ptr %b, ptr %c) {
+; SPLIT_REMAINDER-LABEL: define void @matmul(
+; SPLIT_REMAINDER-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD:%.*]] = load <3 x float>, ptr [[A]], align 4
+; SPLIT_REMAINDER-NEXT: [[VEC_GEP:%.*]] = getelementptr float, ptr [[A]], i64 3
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD1:%.*]] = load <3 x float>, ptr [[VEC_GEP]], align 4
+; SPLIT_REMAINDER-NEXT: [[VEC_GEP2:%.*]] = getelementptr float, ptr [[A]], i64 6
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD3:%.*]] = load <3 x float>, ptr [[VEC_GEP2]], align 4
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD4:%.*]] = load <3 x float>, ptr [[B]], align 4
+; SPLIT_REMAINDER-NEXT: [[BLOCK:%.*]] = shufflevector <3 x float> [[COL_LOAD]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP1:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP2:%.*]] = fmul <2 x float> [[BLOCK]], [[SPLAT_SPLAT]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK5:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP3:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 1
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <2 x float> poison, float [[TMP3]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT6]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP4:%.*]] = fmul <2 x float> [[BLOCK5]], [[SPLAT_SPLAT7]]
+; SPLIT_REMAINDER-NEXT: [[TMP5:%.*]] = fadd <2 x float> [[TMP2]], [[TMP4]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK8:%.*]] = shufflevector <3 x float> [[COL_LOAD3]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP6:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 2
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT9:%.*]] = insertelement <2 x float> poison, float [[TMP6]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT10:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT9]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP7:%.*]] = fmul <2 x float> [[BLOCK8]], [[SPLAT_SPLAT10]]
+; SPLIT_REMAINDER-NEXT: [[TMP8:%.*]] = fadd <2 x float> [[TMP5]], [[TMP7]]
+; SPLIT_REMAINDER-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP8]], <2 x float> poison, <3 x i32> <i32 0, i32 1, i32 poison>
+; SPLIT_REMAINDER-NEXT: [[TMP10:%.*]] = shufflevector <3 x float> poison, <3 x float> [[TMP9]], <3 x i32> <i32 3, i32 4, i32 2>
+; SPLIT_REMAINDER-NEXT: [[BLOCK11:%.*]] = shufflevector <3 x float> [[COL_LOAD]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP11:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT12:%.*]] = insertelement <1 x float> poison, float [[TMP11]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT13:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT12]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP12:%.*]] = fmul <1 x float> [[BLOCK11]], [[SPLAT_SPLAT13]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK14:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP13:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 1
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT15:%.*]] = insertelement <1 x float> poison, float [[TMP13]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT15]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP14:%.*]] = fmul <1 x float> [[BLOCK14]], [[SPLAT_SPLAT16]]
+; SPLIT_REMAINDER-NEXT: [[TMP15:%.*]] = fadd <1 x float> [[TMP12]], [[TMP14]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK17:%.*]] = shufflevector <3 x float> [[COL_LOAD3]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP16:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 2
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT18:%.*]] = insertelement <1 x float> poison, float [[TMP16]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT18]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP17:%.*]] = fmul <1 x float> [[BLOCK17]], [[SPLAT_SPLAT19]]
+; SPLIT_REMAINDER-NEXT: [[TMP18:%.*]] = fadd <1 x float> [[TMP15]], [[TMP17]]
+; SPLIT_REMAINDER-NEXT: [[TMP19:%.*]] = shufflevector <1 x float> [[TMP18]], <1 x float> poison, <3 x i32> <i32 0, i32 poison, i32 poison>
+; SPLIT_REMAINDER-NEXT: [[TMP20:%.*]] = shufflevector <3 x float> [[TMP10]], <3 x float> [[TMP19]], <3 x i32> <i32 0, i32 1, i32 3>
+; SPLIT_REMAINDER-NEXT: store <3 x float> [[TMP20]], ptr [[C]], align 4
+; SPLIT_REMAINDER-NEXT: ret void
+;
+; NO_SPLIT_REMAINDER-LABEL: define void @matmul(
+; NO_SPLIT_REMAINDER-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD:%.*]] = load <3 x float>, ptr [[A]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[VEC_GEP:%.*]] = getelementptr float, ptr [[A]], i64 3
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD1:%.*]] = load <3 x float>, ptr [[VEC_GEP]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[VEC_GEP2:%.*]] = getelementptr float, ptr [[A]], i64 6
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD3:%.*]] = load <3 x float>, ptr [[VEC_GEP2]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD4:%.*]] = load <3 x float>, ptr [[B]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK:%.*]] = shufflevector <3 x float> [[COL_LOAD]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP1:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <3 x float> poison, float [[TMP1]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP2:%.*]] = fmul <3 x float> [[BLOCK]], [[SPLAT_SPLAT]]
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK5:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP3:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 1
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <3 x float> poison, float [[TMP3]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT6]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP4:%.*]] = fmul <3 x float> [[BLOCK5]], [[SPLAT_SPLAT7]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP5:%.*]] = fadd <3 x float> [[TMP2]], [[TMP4]]
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK8:%.*]] = shufflevector <3 x float> [[COL_LOAD3]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP6:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 2
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT9:%.*]] = insertelement <3 x float> poison, float [[TMP6]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT10:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT9]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP7:%.*]] = fmul <3 x float> [[BLOCK8]], [[SPLAT_SPLAT10]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP8:%.*]] = fadd <3 x float> [[TMP5]], [[TMP7]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP9:%.*]] = shufflevector <3 x float> [[TMP8]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP10:%.*]] = shufflevector <3 x float> poison, <3 x float> [[TMP9]], <3 x i32> <i32 3, i32 4, i32 5>
+; NO_SPLIT_REMAINDER-NEXT: store <3 x float> [[TMP10]], ptr [[C]], align 4
+; NO_SPLIT_REMAINDER-NEXT: ret void
+;
+ %a_load = load <9 x float>, ptr %a, align 4
+ %b_load = load <3 x float>, ptr %b, align 4
+ %matmul = tail call <3 x float> @llvm.matrix.multiply.v9f32.v3f32.v3f32(<9 x float> %a_load, <3 x float> %b_load, i32 3, i32 3, i32 1)
+ store <3 x float> %matmul, ptr %c, align 4
+ ret void
+}
diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/PGO-nontrivial-unswitch.ll b/llvm/test/Transforms/PhaseOrdering/unswitch-cold-func.ll
index 239397b..a6ebdf0 100644
--- a/llvm/test/Transforms/SimpleLoopUnswitch/PGO-nontrivial-unswitch.ll
+++ b/llvm/test/Transforms/PhaseOrdering/unswitch-cold-func.ll
@@ -1,13 +1,14 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes='require<profile-summary>,function(loop-mssa(simple-loop-unswitch<nontrivial>))' -S | FileCheck %s
+; RUN: opt < %s -passes='pgo-force-function-attrs,function(loop-mssa(simple-loop-unswitch<nontrivial>))' -pgo-kind=pgo-instr-use-pipeline -pgo-cold-func-opt=optsize -S | FileCheck %s
+; RUN: opt < %s -passes='pgo-force-function-attrs,function(loop-mssa(simple-loop-unswitch<nontrivial>))' -pgo-kind=pgo-instr-use-pipeline -pgo-cold-func-opt=minsize -S | FileCheck %s
;; Check that non-trivial loop unswitching is not applied to a cold loop in a
;; cold loop nest.
;; IR was generated from the following loop nest, profiled when called
;; with M=0 and N=0.
-;; void hotFunction(bool cond, int M, int N, int * A, int *B, int *C) {
+;; void function(bool cond, int M, int N, int * A, int *B, int *C) {
;; for (unsigned j = 0; j < M; j++)
;; for (unsigned i=0; i < N; i++) {
;; A[i] = B[i] + C[i];
@@ -15,8 +16,8 @@
;; }
;; }
-define void @_Z11hotFunctionbiiPiS_S_(i1 %cond, i32 %M, i32 %N, ptr %A, ptr %B, ptr %C) !prof !36 {
-; CHECK-LABEL: define void @_Z11hotFunctionbiiPiS_S_
+define void @_Z11functionbiiPiS_S_(i1 %cond, i32 %M, i32 %N, ptr %A, ptr %B, ptr %C) !prof !36 {
+; CHECK-LABEL: define void @_Z11functionbiiPiS_S_
; CHECK-SAME: (i1 [[COND:%.*]], i32 [[M:%.*]], i32 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {{.*}}{
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP19_NOT:%.*]] = icmp eq i32 [[M]], 0
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/last-non-copyable-inst-used-outside-bb.ll b/llvm/test/Transforms/SLPVectorizer/X86/last-non-copyable-inst-used-outside-bb.ll
new file mode 100644
index 0000000..2f97b41
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/last-non-copyable-inst-used-outside-bb.ll
@@ -0,0 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-99999 < %s | FileCheck %s
+
+define void @test() {
+; CHECK-LABEL: define void @test() {
+; CHECK-NEXT: [[BB:.*]]:
+; CHECK-NEXT: br label %[[BB1:.*]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[TMP0:%.*]] = phi <4 x i32> [ zeroinitializer, %[[BB]] ], [ [[TMP7:%.*]], %[[BB16:.*]] ], [ zeroinitializer, %[[BB1]] ]
+; CHECK-NEXT: br i1 false, label %[[BB1]], label %[[BB5:.*]]
+; CHECK: [[BB5]]:
+; CHECK-NEXT: [[PHI8:%.*]] = phi double [ 0.000000e+00, %[[BB16]] ], [ 0.000000e+00, %[[BB1]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = phi <4 x i32> [ [[TMP8:%.*]], %[[BB16]] ], [ <i32 poison, i32 poison, i32 0, i32 0>, %[[BB1]] ]
+; CHECK-NEXT: switch i32 0, label %[[BB21:.*]] [
+; CHECK-NEXT: i32 4, label %[[BB21]]
+; CHECK-NEXT: i32 1, label %[[BB21]]
+; CHECK-NEXT: i32 0, label %[[BB9:.*]]
+; CHECK-NEXT: ]
+; CHECK: [[BB9]]:
+; CHECK-NEXT: [[PHI13:%.*]] = phi double [ 0.000000e+00, %[[BB21]] ], [ 0.000000e+00, %[[BB5]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = phi <4 x i32> [ [[TMP1]], %[[BB21]] ], [ <i32 poison, i32 poison, i32 0, i32 0>, %[[BB5]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = phi <4 x i32> [ [[TMP9:%.*]], %[[BB21]] ], [ <i32 poison, i32 poison, i32 0, i32 0>, %[[BB5]] ]
+; CHECK-NEXT: switch i32 0, label %[[BB15:.*]] [
+; CHECK-NEXT: i32 1, label %[[BB14:.*]]
+; CHECK-NEXT: i32 0, label %[[BB16]]
+; CHECK-NEXT: ]
+; CHECK: [[BB14]]:
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> <i32 poison, i32 poison, i32 0, i32 poison>, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
+; CHECK-NEXT: br label %[[BB16]]
+; CHECK: [[BB15]]:
+; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i32> <i32 poison, i32 poison, i32 0, i32 0>, [[TMP2]]
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP5]], <4 x i32> <i32 poison, i32 poison, i32 2, i32 7>
+; CHECK-NEXT: br label %[[BB16]]
+; CHECK: [[BB16]]:
+; CHECK-NEXT: [[PHI20:%.*]] = phi double [ 0.000000e+00, %[[BB15]] ], [ 0.000000e+00, %[[BB14]] ], [ 0.000000e+00, %[[BB9]] ]
+; CHECK-NEXT: [[TMP7]] = phi <4 x i32> [ [[TMP5]], %[[BB15]] ], [ [[TMP4]], %[[BB14]] ], [ <i32 poison, i32 poison, i32 0, i32 0>, %[[BB9]] ]
+; CHECK-NEXT: [[TMP8]] = phi <4 x i32> [ [[TMP6]], %[[BB15]] ], [ [[TMP3]], %[[BB14]] ], [ <i32 poison, i32 poison, i32 0, i32 0>, %[[BB9]] ]
+; CHECK-NEXT: br i1 false, label %[[BB5]], label %[[BB1]]
+; CHECK: [[BB21]]:
+; CHECK-NEXT: [[TMP9]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> <i32 poison, i32 poison, i32 0, i32 poison>, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
+; CHECK-NEXT: br label %[[BB9]]
+;
+bb:
+ br label %bb1
+
+bb1:
+ %phi = phi i32 [ 0, %bb ], [ 0, %bb1 ], [ %phi17, %bb16 ]
+ %phi2 = phi i32 [ 0, %bb ], [ 0, %bb1 ], [ %phi18, %bb16 ]
+ %phi3 = phi i32 [ 0, %bb ], [ poison, %bb16 ], [ 0, %bb1 ]
+ %phi4 = phi i32 [ 0, %bb ], [ poison, %bb16 ], [ 0, %bb1 ]
+ br i1 false, label %bb1, label %bb5
+
+bb5:
+ %phi6 = phi i32 [ %phi17, %bb16 ], [ 0, %bb1 ]
+ %phi7 = phi i32 [ %phi19, %bb16 ], [ 0, %bb1 ]
+ %phi8 = phi double [ 0.000000e+00, %bb16 ], [ 0.000000e+00, %bb1 ]
+ switch i32 0, label %bb21 [
+ i32 4, label %bb21
+ i32 1, label %bb21
+ i32 0, label %bb9
+ ]
+
+bb9:
+ %phi10 = phi i32 [ %phi6, %bb21 ], [ 0, %bb5 ]
+ %phi11 = phi i32 [ %phi7, %bb21 ], [ 0, %bb5 ]
+ %phi12 = phi i32 [ 0, %bb21 ], [ 0, %bb5 ]
+ %phi13 = phi double [ 0.000000e+00, %bb21 ], [ 0.000000e+00, %bb5 ]
+ switch i32 0, label %bb15 [
+ i32 1, label %bb14
+ i32 0, label %bb16
+ ]
+
+bb14:
+ br label %bb16
+
+bb15:
+ %add = add i32 0, %phi10
+ br label %bb16
+
+bb16:
+ %phi17 = phi i32 [ %add, %bb15 ], [ %phi10, %bb14 ], [ 0, %bb9 ]
+ %phi18 = phi i32 [ %phi11, %bb15 ], [ 0, %bb14 ], [ 0, %bb9 ]
+ %phi19 = phi i32 [ %phi12, %bb15 ], [ %phi12, %bb14 ], [ 0, %bb9 ]
+ %phi20 = phi double [ 0.000000e+00, %bb15 ], [ 0.000000e+00, %bb14 ], [ 0.000000e+00, %bb9 ]
+ br i1 false, label %bb5, label %bb1
+
+bb21:
+ br label %bb9
+}
diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-markloopasdeleted.ll b/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-markloopasdeleted.ll
index 9ab713c..383407b 100644
--- a/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-markloopasdeleted.ll
+++ b/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-markloopasdeleted.ll
@@ -18,7 +18,6 @@
; the analysis caches.
;
; CHECK: Running pass: SimpleLoopUnswitchPass on loop %loop_begin in function test6
-; CHECK-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-NEXT: Clearing all analysis results for: loop_a_inner
diff --git a/llvm/test/Verifier/matrix-intrinsics.ll b/llvm/test/Verifier/matrix-intrinsics.ll
index b6d5ad9..43d1a79 100644
--- a/llvm/test/Verifier/matrix-intrinsics.ll
+++ b/llvm/test/Verifier/matrix-intrinsics.ll
@@ -1,8 +1,7 @@
-; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+; RUN: not opt -S %s 2>&1 | FileCheck %s
define <4 x float> @transpose(<4 x float> %m, i32 %arg) {
-; CHECK: assembly parsed, but does not verify as correct!
-; CHECK-NEXT: Result of a matrix operation does not fit in the returned vector!
+; CHECK: Result of a matrix operation does not fit in the returned vector!
; CHECK-NEXT: Result of a matrix operation does not fit in the returned vector!
; CHECK-NEXT: Result of a matrix operation does not fit in the returned vector!
; CHECK-NEXT: immarg operand has non-immediate parameter
@@ -118,16 +117,34 @@ define void @column.major_store_stride_too_small(ptr %m, i64 %arg) {
ret void
}
+define <4 x float> @column.major_load_stride_i128(ptr %m, i32 %arg) {
+; CHECK-NEXT: Stride bitwidth cannot exceed 64!
+; CHECK-NEXT: ptr @llvm.matrix.column.major.load.v4f32.i128
+ %result.1 = call <4 x float> @llvm.matrix.column.major.load.v4f32.i128(ptr %m, i128 u0x10000000000000000, i1 false, i32 2, i32 2)
+ ret <4 x float> %result.1
+}
+
+define void @column.major_store_stride_i128(ptr %m, i64 %arg) {
+; CHECK-NEXT: Stride bitwidth cannot exceed 64!
+; CHECK-NEXT: ptr @llvm.matrix.column.major.store.v4f32.i128
+ call void @llvm.matrix.column.major.store.v4f32.i128(<4 x float> zeroinitializer, ptr %m, i128 u0x10000000000000000, i1 false, i32 2, i32 2)
+ ret void
+}
+
declare <4 x i32> @llvm.matrix.column.major.load.v4i32.i64(ptr, i64, i1, i32, i32)
declare <4 x float> @llvm.matrix.column.major.load.v4f32.p0(ptr, i64, i1, i32, i32)
declare <4 x float> @llvm.matrix.column.major.load.v4f32.i64(ptr, i64, i1, i32, i32)
declare <6 x float> @llvm.matrix.column.major.load.v6f32.i64(ptr, i64, i1, i32, i32)
+declare <6 x float> @llvm.matrix.column.major.load.v6f32.i8(ptr, i8, i1, i32, i32)
+declare <6 x float> @llvm.matrix.column.major.load.v6f32.i128(ptr, i28, i1, i32, i32)
declare void @llvm.matrix.column.major.store.v4f32.i64(<4 x float>, ptr, i64, i1, i32, i32)
declare void @llvm.matrix.column.major.store.v6f32.i64(<6 x float>, ptr, i64, i1, i32, i32)
declare void @llvm.matrix.column.major.store.v4i32.vi32(<4 x i32>, ptr, i64, i1, i32, i32)
declare void @llvm.matrix.column.major.store.v4f32.p0(<4 x float>, ptr, i64, i1, i32, i32)
declare void @llvm.matrix.column.major.store.v4p0.i64(<4 x ptr>, ptr, i64, i1, i32, i32)
+declare void @llvm.matrix.column.major.store.v4p0.i8(<4 x ptr>, ptr, i8, i1, i32, i32)
+declare void @llvm.matrix.column.major.store.v4p0.i128(<4 x ptr>, ptr, i128, i1, i32, i32)
declare <4 x i32> @llvm.matrix.transpose.v4i32.v4f32(<4 x float>, i32, i32)
declare <4 x float> @llvm.matrix.transpose.v4f32(<4 x float>, i32, i32)
diff --git a/llvm/test/tools/llvm-reduce/reduce-instructions-alloca.ll b/llvm/test/tools/llvm-reduce/reduce-instructions-alloca.ll
new file mode 100644
index 0000000..94b45d2
--- /dev/null
+++ b/llvm/test/tools/llvm-reduce/reduce-instructions-alloca.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-reduce --abort-on-invalid-reduction --delta-passes=instructions --test FileCheck --test-arg --check-prefixes=CHECK,INTERESTING --test-arg %s --test-arg --input-file %s -o %t
+; RUN: FileCheck -check-prefixes=CHECK,RESULT %s < %t
+
+; CHECK-LABEL: define void @alloca(
+; INTERESTING: call void @llvm.lifetime.start.p0(
+; INTERESTING: call void @llvm.lifetime.end.p0(
+
+; RESULT: call void @llvm.lifetime.start.p0(ptr poison)
+; RESULT-NEXT: call void @llvm.lifetime.end.p0(ptr poison)
+; RESULT-NEXT: ret void
+define void @alloca(ptr %ptr) {
+ %alloca = alloca i32, align 4
+ call void @llvm.lifetime.start.p0(ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
+ ret void
+}
diff --git a/llvm/tools/llvm-c-test/debuginfo.c b/llvm/tools/llvm-c-test/debuginfo.c
index a2f4b3e..9db7aa0 100644
--- a/llvm/tools/llvm-c-test/debuginfo.c
+++ b/llvm/tools/llvm-c-test/debuginfo.c
@@ -43,6 +43,9 @@ int llvm_test_dibuilder(void) {
LLVMMetadataRef File = LLVMDIBuilderCreateFile(DIB, Filename,
strlen(Filename), ".", 1);
+ LLVMMetadataRef FileCS = LLVMDIBuilderCreateFileWithChecksum(
+ DIB, Filename, strlen(Filename), ".", 1, CSK_MD5, "1234", 4, "source", 6);
+
LLVMMetadataRef CompileUnit = LLVMDIBuilderCreateCompileUnit(
DIB, LLVMDWARFSourceLanguageC, File, "llvm-c-test", 11, 0, NULL, 0, 0,
NULL, 0, LLVMDWARFEmissionFull, 0, 0, 0, "/", 1, "", 0);
@@ -61,7 +64,7 @@ int llvm_test_dibuilder(void) {
"/test/include/llvm-c-test-import.h", 34,
"", 0);
LLVMMetadataRef ImportedModule = LLVMDIBuilderCreateImportedModuleFromModule(
- DIB, Module, OtherModule, File, 42, NULL, 0);
+ DIB, Module, OtherModule, FileCS, 42, NULL, 0);
LLVMDIBuilderCreateImportedModuleFromAlias(DIB, Module, ImportedModule, File,
42, NULL, 0);
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceInstructions.cpp b/llvm/tools/llvm-reduce/deltas/ReduceInstructions.cpp
index f1f5d6b..19b69e8 100644
--- a/llvm/tools/llvm-reduce/deltas/ReduceInstructions.cpp
+++ b/llvm/tools/llvm-reduce/deltas/ReduceInstructions.cpp
@@ -13,6 +13,8 @@
#include "ReduceInstructions.h"
#include "Utils.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
using namespace llvm;
@@ -37,7 +39,9 @@ void llvm::reduceInstructionsDeltaPass(Oracle &O, ReducerWorkItem &WorkItem) {
for (auto &Inst :
make_early_inc_range(make_range(BB.begin(), std::prev(BB.end())))) {
if (!shouldAlwaysKeep(Inst) && !O.shouldKeep()) {
- Inst.replaceAllUsesWith(getDefaultValue(Inst.getType()));
+ Inst.replaceAllUsesWith(isa<AllocaInst>(Inst)
+ ? PoisonValue::get(Inst.getType())
+ : getDefaultValue(Inst.getType()));
Inst.eraseFromParent();
}
}
diff --git a/llvm/unittests/ADT/BitTest.cpp b/llvm/unittests/ADT/BitTest.cpp
index eaed4e1..5b3df91 100644
--- a/llvm/unittests/ADT/BitTest.cpp
+++ b/llvm/unittests/ADT/BitTest.cpp
@@ -270,6 +270,22 @@ TEST(BitTest, BitWidthConstexpr) {
llvm::bit_width_constexpr(std::numeric_limits<uint64_t>::max()) == 64);
}
+TEST(BitTest, BitCeilConstexpr) {
+ static_assert(llvm::bit_ceil_constexpr(0u) == 1);
+ static_assert(llvm::bit_ceil_constexpr(1u) == 1);
+ static_assert(llvm::bit_ceil_constexpr(2u) == 2);
+ static_assert(llvm::bit_ceil_constexpr(3u) == 4);
+ static_assert(llvm::bit_ceil_constexpr(4u) == 4);
+ static_assert(llvm::bit_ceil_constexpr(5u) == 8);
+ static_assert(llvm::bit_ceil_constexpr(6u) == 8);
+ static_assert(llvm::bit_ceil_constexpr(7u) == 8);
+ static_assert(llvm::bit_ceil_constexpr(8u) == 8);
+
+ static_assert(llvm::bit_ceil_constexpr(255u) == 256);
+ static_assert(llvm::bit_ceil_constexpr(256u) == 256);
+ static_assert(llvm::bit_ceil_constexpr(257u) == 512);
+}
+
TEST(BitTest, CountlZero) {
uint8_t Z8 = 0;
uint16_t Z16 = 0;
diff --git a/llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp b/llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp
index 3ab2caf..57e15a4 100644
--- a/llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp
+++ b/llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp
@@ -39,20 +39,21 @@ TEST(DataLayoutUpgradeTest, ValidDataLayoutUpgrade) {
"64-i128:128-n32:64-S128-Fn32");
// Check that AMDGPU targets add -G1 if it's not present.
- EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32", "r600"), "e-p:32:32-G1");
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32", "r600"), "m:e-e-p:32:32-G1");
// and that ANDGCN adds p7 and p8 as well.
EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64", "amdgcn"),
- "e-p:64:64-G1-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:192:"
- "256:256:32");
+ "m:e-e-p:64:64-G1-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:"
+ "192:256:256:32");
EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-G1", "amdgcn"),
- "e-p:64:64-G1-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:192:"
- "256:256:32");
+ "m:e-e-p:64:64-G1-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:"
+ "192:256:256:32");
// Check that the old AMDGCN p8:128:128 definition is upgraded
EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-p8:128:128-G1", "amdgcn"),
- "e-p:64:64-p8:128:128:128:48-G1-ni:7:8:9-p7:160:256:256:32-"
- "p9:192:256:256:32");
+ "m:e-e-p:64:64-p8:128:128:128:48-G1-ni:7:8:9-p7:160:256:256:32-p9:"
+ "192:256:256:32");
// but that r600 does not.
- EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32-G1", "r600"), "e-p:32:32-G1");
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32-G1", "r600"),
+ "m:e-e-p:32:32-G1");
// Ensure that the non-integral direction for address space 8 doesn't get
// added in to pointer declarations.
@@ -62,11 +63,10 @@ TEST(DataLayoutUpgradeTest, ValidDataLayoutUpgrade) {
"64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-"
"v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7",
"amdgcn"),
- "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-"
- "v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:"
+ "m:e-e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:"
+ "64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:"
"1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9-p7:160:256:256:32-p8:128:128:"
- "128:48-"
- "p9:192:256:256:32");
+ "128:48-p9:192:256:256:32");
// Check that RISCV64 upgrades -n64 to -n32:64.
EXPECT_EQ(UpgradeDataLayoutString("e-m:e-p:64:64-i64:64-i128:128-n64-S128",
@@ -147,28 +147,29 @@ TEST(DataLayoutUpgradeTest, NoDataLayoutUpgrade) {
"64-S128-Fn32");
// Check that AMDGPU targets don't add -G1 if there is already a -G flag.
- EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32-G2", "r600"), "e-p:32:32-G2");
- EXPECT_EQ(UpgradeDataLayoutString("G2", "r600"), "G2");
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32-G2", "r600"),
+ "m:e-e-p:32:32-G2");
+ EXPECT_EQ(UpgradeDataLayoutString("G2", "r600"), "m:e-G2");
EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-G2", "amdgcn"),
- "e-p:64:64-G2-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:192:"
- "256:256:32");
+ "m:e-e-p:64:64-G2-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:"
+ "192:256:256:32");
EXPECT_EQ(UpgradeDataLayoutString("G2-e-p:64:64", "amdgcn"),
- "G2-e-p:64:64-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:192:"
- "256:256:32");
+ "m:e-G2-e-p:64:64-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:"
+ "192:256:256:32");
EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-G0", "amdgcn"),
- "e-p:64:64-G0-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:192:"
- "256:256:32");
+ "m:e-e-p:64:64-G0-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:"
+ "192:256:256:32");
// Check that AMDGCN targets don't add already declared address space 7.
EXPECT_EQ(
UpgradeDataLayoutString("e-p:64:64-p7:64:64", "amdgcn"),
- "e-p:64:64-p7:64:64-G1-ni:7:8:9-p8:128:128:128:48-p9:192:256:256:32");
+ "m:e-e-p:64:64-p7:64:64-G1-ni:7:8:9-p8:128:128:128:48-p9:192:256:256:32");
EXPECT_EQ(
UpgradeDataLayoutString("p7:64:64-G2-e-p:64:64", "amdgcn"),
- "p7:64:64-G2-e-p:64:64-ni:7:8:9-p8:128:128:128:48-p9:192:256:256:32");
+ "m:e-p7:64:64-G2-e-p:64:64-ni:7:8:9-p8:128:128:128:48-p9:192:256:256:32");
EXPECT_EQ(
UpgradeDataLayoutString("e-p:64:64-p7:64:64-G1", "amdgcn"),
- "e-p:64:64-p7:64:64-G1-ni:7:8:9-p8:128:128:128:48-p9:192:256:256:32");
+ "m:e-e-p:64:64-p7:64:64-G1-ni:7:8:9-p8:128:128:128:48-p9:192:256:256:32");
// Check that SPIR & SPIRV targets don't add -G1 if there is already a -G
// flag.
@@ -198,10 +199,10 @@ TEST(DataLayoutUpgradeTest, EmptyDataLayout) {
EXPECT_EQ(DL2, "e-m:e-p:32:32-i64:64-f80:128-n8:16:32:64-S128");
// Check that AMDGPU targets add G1 if it's not present.
- EXPECT_EQ(UpgradeDataLayoutString("", "r600"), "G1");
+ EXPECT_EQ(UpgradeDataLayoutString("", "r600"), "m:e-G1");
EXPECT_EQ(
UpgradeDataLayoutString("", "amdgcn"),
- "G1-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32");
+ "m:e-G1-ni:7:8:9-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32");
// Check that SPIR & SPIRV targets add G1 if it's not present.
EXPECT_EQ(UpgradeDataLayoutString("", "spir"), "G1");
diff --git a/llvm/unittests/ExecutionEngine/Orc/ReOptimizeLayerTest.cpp b/llvm/unittests/ExecutionEngine/Orc/ReOptimizeLayerTest.cpp
index f35a378..686d85d 100644
--- a/llvm/unittests/ExecutionEngine/Orc/ReOptimizeLayerTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/ReOptimizeLayerTest.cpp
@@ -43,7 +43,7 @@ protected:
// COFF-ARM64 is not supported yet
auto Triple = JTMB->getTargetTriple();
- if (Triple.isOSBinFormatCOFF() && Triple.isAArch64())
+ if (Triple.isOSBinFormatCOFF())
GTEST_SKIP();
// SystemZ is not supported yet.
diff --git a/llvm/utils/gn/secondary/lld/test/BUILD.gn b/llvm/utils/gn/secondary/lld/test/BUILD.gn
index dabc578..585e0a4 100644
--- a/llvm/utils/gn/secondary/lld/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/lld/test/BUILD.gn
@@ -1,5 +1,6 @@
import("//llvm/lib/DebugInfo/PDB/enable_dia.gni")
import("//llvm/triples.gni")
+import("//llvm/utils/gn/build/libs/pthread/enable.gni")
import("//llvm/utils/gn/build/libs/xml/enable.gni")
import("//llvm/utils/gn/build/libs/zlib/enable.gni")
import("//llvm/utils/gn/build/libs/zstd/enable.gni")
@@ -88,6 +89,12 @@ write_lit_cfg("lit_site_cfg") {
extra_values += [ "LLVM_ENABLE_LIBXML2=0" ] # Must be 0.
}
+ if (llvm_enable_threads) {
+ extra_values += [ "LLVM_ENABLE_THREADS=1" ]
+ } else {
+ extra_values += [ "LLVM_ENABLE_THREADS=0" ] # Must be 0.
+ }
+
if (llvm_enable_zlib) {
extra_values += [ "LLVM_ENABLE_ZLIB=1" ]
} else {
diff --git a/llvm/utils/profcheck-xfail.txt b/llvm/utils/profcheck-xfail.txt
index 3f8be5e..b570f8d 100644
--- a/llvm/utils/profcheck-xfail.txt
+++ b/llvm/utils/profcheck-xfail.txt
@@ -107,6 +107,7 @@ Instrumentation/AddressSanitizer/asan-stack-safety.ll
Instrumentation/AddressSanitizer/asan-struct-scalable.ll
Instrumentation/AddressSanitizer/asan-vp-load-store.ll
Instrumentation/AddressSanitizer/asan-vs-gvn.ll
+Instrumentation/AddressSanitizer/asan-win-dont-instrument-catchpad.ll
Instrumentation/AddressSanitizer/basic.ll
Instrumentation/AddressSanitizer/basic-msvc64.ll
Instrumentation/AddressSanitizer/byref-args.ll
diff --git a/llvm/utils/release/build_llvm_release.bat b/llvm/utils/release/build_llvm_release.bat
index 54645d0..001339f 100755..100644
--- a/llvm/utils/release/build_llvm_release.bat
+++ b/llvm/utils/release/build_llvm_release.bat
@@ -156,16 +156,14 @@ set common_cmake_flags=^
-DLLVM_BUILD_LLVM_C_DYLIB=ON ^
-DPython3_FIND_REGISTRY=NEVER ^
-DPACKAGE_VERSION=%package_version% ^
- -DLLDB_RELOCATABLE_PYTHON=1 ^
- -DLLDB_EMBED_PYTHON_HOME=OFF ^
-DCMAKE_CL_SHOWINCLUDES_PREFIX="Note: including file: " ^
-DLLVM_ENABLE_LIBXML2=FORCE_ON ^
- -DLLDB_ENABLE_LIBXML2=OFF ^
-DCLANG_ENABLE_LIBXML2=OFF ^
-DCMAKE_C_FLAGS="%common_compiler_flags%" ^
-DCMAKE_CXX_FLAGS="%common_compiler_flags%" ^
-DLLVM_ENABLE_RPMALLOC=ON ^
- -DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;lld;compiler-rt;lldb;openmp"
+ -DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;lld" ^
+ -DLLVM_ENABLE_RUNTIMES="compiler-rt;openmp"
if "%force-msvc%" == "" (
where /q clang-cl
@@ -185,6 +183,11 @@ if "%force-msvc%" == "" (
)
)
+set common_lldb_flags=^
+ -DLLDB_RELOCATABLE_PYTHON=1 ^
+ -DLLDB_EMBED_PYTHON_HOME=OFF ^
+ -DLLDB_ENABLE_LIBXML2=OFF
+
set cmake_profile_flags=""
REM Preserve original path
@@ -192,8 +195,8 @@ set OLDPATH=%PATH%
REM Build the 32-bits and/or 64-bits binaries.
if "%x86%" == "true" call :do_build_32 || exit /b 1
-if "%x64%" == "true" call :do_build_64 || exit /b 1
-if "%arm64%" == "true" call :do_build_arm64 || exit /b 1
+if "%x64%" == "true" call :do_build_64_common amd64 %python64_dir% || exit /b 1
+if "%arm64%" == "true" call :do_build_64_common arm64 %pythonarm64_dir% || exit /b 1
exit /b 0
::==============================================================================
@@ -212,8 +215,6 @@ set "stage0_bin_dir=%build_dir%/build32_stage0/bin"
set cmake_flags=^
%common_cmake_flags% ^
-DLLVM_ENABLE_RPMALLOC=OFF ^
- -DLLDB_TEST_COMPILER=%stage0_bin_dir%/clang.exe ^
- -DPYTHON_HOME=%PYTHONHOME% ^
-DPython3_ROOT_DIR=%PYTHONHOME% ^
-DLIBXML2_INCLUDE_DIR=%libxmldir%/include/libxml2 ^
-DLIBXML2_LIBRARIES=%libxmldir%/lib/libxml2s.lib
@@ -231,6 +232,9 @@ REM CMake expects the paths that specifies the compiler and linker to be
REM with forward slash.
set all_cmake_flags=^
%cmake_flags% ^
+ -DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;lld;lldb;" ^
+ %common_lldb_flags% ^
+ -DPYTHON_HOME=%PYTHONHOME% ^
-DCMAKE_C_COMPILER=%stage0_bin_dir%/clang-cl.exe ^
-DCMAKE_CXX_COMPILER=%stage0_bin_dir%/clang-cl.exe ^
-DCMAKE_LINKER=%stage0_bin_dir%/lld-link.exe ^
@@ -254,32 +258,42 @@ exit /b 0
::==============================================================================
::==============================================================================
-:: Build 64-bits binaries.
+:: Build 64-bits binaries (common function for both x64 and arm64)
::==============================================================================
-:do_build_64
-call :set_environment %python64_dir% || exit /b 1
-call "%vsdevcmd%" -arch=amd64 || exit /b 1
+:do_build_64_common
+set arch=%1
+set python_dir=%2
+
+call :set_environment %python_dir% || exit /b 1
+call "%vsdevcmd%" -arch=%arch% || exit /b 1
@echo on
-mkdir build64_stage0
-cd build64_stage0
+mkdir build_%arch%_stage0
+cd build_%arch%_stage0
call :do_build_libxml || exit /b 1
REM Stage0 binaries directory; used in stage1.
-set "stage0_bin_dir=%build_dir%/build64_stage0/bin"
+set "stage0_bin_dir=%build_dir%/build_%arch%_stage0/bin"
set cmake_flags=^
%common_cmake_flags% ^
- -DLLDB_TEST_COMPILER=%stage0_bin_dir%/clang.exe ^
- -DPYTHON_HOME=%PYTHONHOME% ^
-DPython3_ROOT_DIR=%PYTHONHOME% ^
-DLIBXML2_INCLUDE_DIR=%libxmldir%/include/libxml2 ^
- -DLIBXML2_LIBRARIES=%libxmldir%/lib/libxml2s.lib
+ -DLIBXML2_LIBRARIES=%libxmldir%/lib/libxml2s.lib ^
+ -DCLANG_DEFAULT_LINKER=lld
+if "%arch%"=="arm64" (
+ set cmake_flags=%cmake_flags% ^
+ -DCOMPILER_RT_BUILD_SANITIZERS=OFF
+)
-cmake -GNinja %cmake_flags% %llvm_src%\llvm || exit /b 1
+cmake -GNinja %cmake_flags% ^
+ -DLLVM_TARGETS_TO_BUILD=Native ^
+ %llvm_src%\llvm || exit /b 1
ninja || ninja || ninja || exit /b 1
ninja check-llvm || ninja check-llvm || ninja check-llvm || exit /b 1
ninja check-clang || ninja check-clang || ninja check-clang || exit /b 1
ninja check-lld || ninja check-lld || ninja check-lld || exit /b 1
-ninja check-sanitizer || ninja check-sanitizer || ninja check-sanitizer || exit /b 1
+if "%arch%"=="amd64" (
+ ninja check-runtimes || ninja check-runtimes || ninja check-runtimes || exit /b 1
+)
ninja check-clang-tools || ninja check-clang-tools || ninja check-clang-tools || exit /b 1
ninja check-clangd || ninja check-clangd || ninja check-clangd || exit /b 1
cd..
@@ -293,24 +307,40 @@ set all_cmake_flags=^
-DCMAKE_LINKER=%stage0_bin_dir%/lld-link.exe ^
-DCMAKE_AR=%stage0_bin_dir%/llvm-lib.exe ^
-DCMAKE_RC=%stage0_bin_dir%/llvm-windres.exe
+if "%arch%"=="arm64" (
+ set all_cmake_flags=%all_cmake_flags% ^
+ -DCPACK_SYSTEM_NAME=woa64
+)
set cmake_flags=%all_cmake_flags:\=/%
-
-mkdir build64
-cd build64
+mkdir build_%arch%
+cd build_%arch%
call :do_generate_profile || exit /b 1
-cmake -GNinja %cmake_flags% %cmake_profile_flags% %llvm_src%\llvm || exit /b 1
+cmake -GNinja %cmake_flags% ^
+ -DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;lld;lldb;flang;mlir" ^
+ %common_lldb_flags% ^
+ -DPYTHON_HOME=%PYTHONHOME% ^
+ %cmake_profile_flags% %llvm_src%\llvm || exit /b 1
ninja || ninja || ninja || exit /b 1
ninja check-llvm || ninja check-llvm || ninja check-llvm || exit /b 1
ninja check-clang || ninja check-clang || ninja check-clang || exit /b 1
ninja check-lld || ninja check-lld || ninja check-lld || exit /b 1
-ninja check-sanitizer || ninja check-sanitizer || ninja check-sanitizer || exit /b 1
+if "%arch%"=="amd64" (
+ ninja check-runtimes || ninja check-runtimes || ninja check-runtimes || exit /b 1
+)
ninja check-clang-tools || ninja check-clang-tools || ninja check-clang-tools || exit /b 1
ninja check-clangd || ninja check-clangd || ninja check-clangd || exit /b 1
+REM ninja check-flang || ninja check-flang || ninja check-flang || exit /b 1
+REM ninja check-mlir || ninja check-mlir || ninja check-mlir || exit /b 1
+REM ninja check-lldb || ninja check-lldb || ninja check-lldb || exit /b 1
ninja package || exit /b 1
:: generate tarball with install toolchain only off
-set filename=clang+llvm-%version%-x86_64-pc-windows-msvc
+if "%arch%"=="amd64" (
+ set filename=clang+llvm-%version%-x86_64-pc-windows-msvc
+) else (
+ set filename=clang+llvm-%version%-aarch64-pc-windows-msvc
+)
cmake -GNinja %cmake_flags% %cmake_profile_flags% -DLLVM_INSTALL_TOOLCHAIN_ONLY=OFF ^
-DCMAKE_INSTALL_PREFIX=%build_dir%/%filename% ..\llvm-project\llvm || exit /b 1
ninja install || exit /b 1
@@ -320,75 +350,7 @@ cd ..
7z a -ttar -so %filename%.tar %filename% | 7z a -txz -si %filename%.tar.xz
exit /b 0
-::==============================================================================
-
-::==============================================================================
-:: Build arm64 binaries.
-::==============================================================================
-:do_build_arm64
-call :set_environment %pythonarm64_dir% || exit /b 1
-call "%vsdevcmd%" -host_arch=x64 -arch=arm64 || exit /b 1
-@echo on
-mkdir build_arm64_stage0
-cd build_arm64_stage0
-call :do_build_libxml || exit /b 1
-
-REM Stage0 binaries directory; used in stage1.
-set "stage0_bin_dir=%build_dir%/build_arm64_stage0/bin"
-set cmake_flags=^
- %common_cmake_flags% ^
- -DCLANG_DEFAULT_LINKER=lld ^
- -DLIBXML2_INCLUDE_DIR=%libxmldir%/include/libxml2 ^
- -DLIBXML2_LIBRARIES=%libxmldir%/lib/libxml2s.lib ^
- -DPython3_ROOT_DIR=%PYTHONHOME% ^
- -DCOMPILER_RT_BUILD_PROFILE=OFF ^
- -DCOMPILER_RT_BUILD_SANITIZERS=OFF
-
-REM We need to build stage0 compiler-rt with clang-cl (msvc lacks some builtins).
-cmake -GNinja %cmake_flags% ^
- -DCMAKE_C_COMPILER=clang-cl.exe ^
- -DCMAKE_CXX_COMPILER=clang-cl.exe ^
- %llvm_src%\llvm || exit /b 1
-ninja || exit /b 1
-::ninja check-llvm || exit /b 1
-::ninja check-clang || exit /b 1
-::ninja check-lld || exit /b 1
-::ninja check-sanitizer || exit /b 1
-::ninja check-clang-tools || exit /b 1
-::ninja check-clangd || exit /b 1
-cd..
-
-REM CMake expects the paths that specifies the compiler and linker to be
-REM with forward slash.
-REM CPACK_SYSTEM_NAME is set to have a correct name for installer generated.
-set all_cmake_flags=^
- %cmake_flags% ^
- -DCMAKE_C_COMPILER=%stage0_bin_dir%/clang-cl.exe ^
- -DCMAKE_CXX_COMPILER=%stage0_bin_dir%/clang-cl.exe ^
- -DCMAKE_LINKER=%stage0_bin_dir%/lld-link.exe ^
- -DCMAKE_AR=%stage0_bin_dir%/llvm-lib.exe ^
- -DCMAKE_RC=%stage0_bin_dir%/llvm-windres.exe ^
- -DCPACK_SYSTEM_NAME=woa64
-set cmake_flags=%all_cmake_flags:\=/%
-mkdir build_arm64
-cd build_arm64
-cmake -GNinja %cmake_flags% %llvm_src%\llvm || exit /b 1
-ninja || exit /b 1
-REM Check but do not fail on errors.
-ninja check-lldb
-::ninja check-llvm || exit /b 1
-::ninja check-clang || exit /b 1
-::ninja check-lld || exit /b 1
-::ninja check-sanitizer || exit /b 1
-::ninja check-clang-tools || exit /b 1
-::ninja check-clangd || exit /b 1
-ninja package || exit /b 1
-cd ..
-
-exit /b 0
-::==============================================================================
-::
::==============================================================================
:: Set PATH and some environment variables.
::==============================================================================
diff --git a/mlir/Maintainers.md b/mlir/Maintainers.md
index 5d3b576c..b495d25 100644
--- a/mlir/Maintainers.md
+++ b/mlir/Maintainers.md
@@ -97,7 +97,7 @@ available, should be contacted first, as they're more active in those areas.
* ‘rocdl’ Dialect ([@krzysz00](https://github.com/krzysz00))
* ‘nvgpu’ Dialect ([@grypp](https://github.com/grypp))
* ‘nvvm’ Dialect ([@grypp](https://github.com/grypp))
-* ‘xegpu’ Dialect ([@chencha3](https://github.com/chencha3), [@Jianhui-Li](https://github.com/Jianhui-Li))
+* ‘xegpu’ Dialect ([@charithaintc](https://github.com/charithaintc), [@Jianhui-Li](https://github.com/Jianhui-Li))
* 'xevm' Dialect ([@silee2](https://github.com/silee2))
#### CPU Dialects
diff --git a/mlir/docs/Canonicalization.md b/mlir/docs/Canonicalization.md
index 686e500..2622c08 100644
--- a/mlir/docs/Canonicalization.md
+++ b/mlir/docs/Canonicalization.md
@@ -55,7 +55,7 @@ Some important things to think about w.r.t. canonicalization patterns:
* It is always good to eliminate operations entirely when possible, e.g. by
folding known identities (like "x + 0 = x").
-* Pattens with expensive running time (i.e. have O(n) complexity) or
+* Patterns with expensive running time (i.e. have O(n) complexity) or
complicated cost models don't belong to canonicalization: since the
algorithm is executed iteratively until fixed-point we want patterns that
execute quickly (in particular their matching phase).
diff --git a/mlir/docs/Dialects/Shard.md b/mlir/docs/Dialects/Shard.md
index eb6ff61..573e888 100644
--- a/mlir/docs/Dialects/Shard.md
+++ b/mlir/docs/Dialects/Shard.md
@@ -27,9 +27,9 @@ the tensor is sharded - not specified manually.
### Device Groups
-Each collective operation runs within a group of devices. You define groups
-using the `grid` and `grid_axes` attributes, which describe how to slice the
-full device grid into smaller groups.
+Collective operations run within groups of devices, which are defined
+using the `grid` and `grid_axes` attributes. These describe
+how the full device grid is sliced into smaller groups.
Devices that have the same coordinates *outside* the listed `grid_axes` belong
to the same group.
diff --git a/mlir/include/mlir-c/Rewrite.h b/mlir/include/mlir-c/Rewrite.h
index 2db1d84..fe42a20 100644
--- a/mlir/include/mlir-c/Rewrite.h
+++ b/mlir/include/mlir-c/Rewrite.h
@@ -352,7 +352,7 @@ typedef struct {
/// Create a rewrite pattern that matches the operation
/// with the given rootName, corresponding to mlir::OpRewritePattern.
-MLIR_CAPI_EXPORTED MlirRewritePattern mlirOpRewritePattenCreate(
+MLIR_CAPI_EXPORTED MlirRewritePattern mlirOpRewritePatternCreate(
MlirStringRef rootName, unsigned benefit, MlirContext context,
MlirRewritePatternCallbacks callbacks, void *userData,
size_t nGeneratedNames, MlirStringRef *generatedNames);
diff --git a/mlir/include/mlir/Conversion/MathToROCDL/MathToROCDL.h b/mlir/include/mlir/Conversion/MathToROCDL/MathToROCDL.h
index 46573e79..60f1888 100644
--- a/mlir/include/mlir/Conversion/MathToROCDL/MathToROCDL.h
+++ b/mlir/include/mlir/Conversion/MathToROCDL/MathToROCDL.h
@@ -9,6 +9,7 @@
#define MLIR_CONVERSION_MATHTOROCDL_MATHTOROCDL_H_
#include "mlir/Conversion/LLVMCommon/TypeConverter.h"
+#include "mlir/Dialect/AMDGPU/Utils/Chipset.h"
#include "mlir/IR/PatternMatch.h"
#include <memory>
@@ -19,8 +20,11 @@ class Pass;
#include "mlir/Conversion/Passes.h.inc"
/// Populate the given list with patterns that convert from Math to ROCDL calls.
-void populateMathToROCDLConversionPatterns(const LLVMTypeConverter &converter,
- RewritePatternSet &patterns);
+// `chipset` specifies the AMDGPU chipset to target. If `std::nullopt`,
+// none of the chipset dependent patterns are added.
+void populateMathToROCDLConversionPatterns(
+ const LLVMTypeConverter &converter, RewritePatternSet &patterns,
+ std::optional<amdgpu::Chipset> chipset);
} // namespace mlir
#endif // MLIR_CONVERSION_MATHTOROCDL_MATHTOROCDL_H_
diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index 25e9d34..70e3e45 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -778,6 +778,10 @@ def ConvertMathToROCDL : Pass<"convert-math-to-rocdl", "ModuleOp"> {
let summary = "Convert Math dialect to ROCDL library calls";
let description = [{
This pass converts supported Math ops to ROCDL library calls.
+
+ The chipset option specifies the target AMDGPU architecture. If the chipset
+ is empty, none of the chipset-dependent patterns are added, and the pass
+ will not attempt to parse the chipset.
}];
let dependentDialects = [
"arith::ArithDialect",
@@ -785,6 +789,9 @@ def ConvertMathToROCDL : Pass<"convert-math-to-rocdl", "ModuleOp"> {
"ROCDL::ROCDLDialect",
"vector::VectorDialect",
];
+ let options = [Option<"chipset", "chipset", "std::string",
+ /*default=*/"\"\"",
+ "Chipset that these operations will run on">];
}
//===----------------------------------------------------------------------===//
@@ -800,7 +807,7 @@ def ConvertMathToSPIRVPass : Pass<"convert-math-to-spirv"> {
// MathToXeVM
//===----------------------------------------------------------------------===//
-def ConvertMathToXeVM : Pass<"convert-math-to-xevm", "ModuleOp"> {
+def ConvertMathToXeVM : Pass<"convert-math-to-xevm"> {
let summary =
"Convert (fast) math operations to native XeVM/SPIRV equivalents";
let description = [{
diff --git a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td
index 8370d35..7184de9 100644
--- a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td
+++ b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td
@@ -112,6 +112,97 @@ def AMDGPU_ExtPackedFp8Op :
}];
}
+def IsValidBlockSize: AttrConstraint<
+ CPred<"::llvm::is_contained({16, 32}, ::llvm::cast<::mlir::IntegerAttr>($_self).getInt())">,
+ "whose value is 16 or 32">;
+
+def AMDGPU_ScaledExtPacked816Op
+ : AMDGPU_Op<"scaled_ext_packed816", [Pure, AllShapesMatch<["source", "res"]>]>,
+ Arguments<(
+ ins AnyTypeOf<[FixedVectorOfShapeAndType<[8], F4E2M1FN>,
+ FixedVectorOfShapeAndType<[8], F8E4M3FN>,
+ FixedVectorOfShapeAndType<[8], F8E5M2>,
+ FixedVectorOfShapeAndType<[16], F6E2M3FN>,
+ FixedVectorOfShapeAndType<[16], F6E3M2FN>]>:$source,
+ FixedVectorOfShapeAndType<[4], F8E8M0FNU>:$scale,
+ ConfinedAttr<I32Attr, [IsValidBlockSize]>:$blockSize,
+ ConfinedAttr<I32Attr, [IntMinValue<0>, IntMaxValue<1>]>:$firstScaleLane,
+ ConfinedAttr<I32Attr, [IntMinValue<0>, IntMaxValue<2>]>:$firstScaleByte)>,
+ Results<(
+ outs AnyTypeOf<[FixedVectorOfShapeAndType<[8], F32>,
+ FixedVectorOfShapeAndType<[8], F16>,
+ FixedVectorOfShapeAndType<[8], BF16>,
+ FixedVectorOfShapeAndType<[16], F32>,
+ FixedVectorOfShapeAndType<[16], F16>,
+ FixedVectorOfShapeAndType<[16], BF16>]>:$res)> {
+
+ let summary = "Extend a vector of packed floating point values";
+
+ let description = [{
+ The scales applied to the input microfloats are stored in two bytes which
+ come from the `scales` input provided in a *half* of the wave identified
+ by `firstScaleLane`. The pair of bytes used is selected by
+ `firstScaleByte`. The 16 vectors in consecutive lanes starting from
+ `firstScaleLane` (which we'll call the scale vectors) will be used by both
+ halves of the wave (with lane L reading from L % 16'th scale vector), but
+ each half will use a different byte.
+
+ When the block size is 32, `firstScaleByte` can be either 0 or 2,
+ selecting halves of the scale vectors. Lanes 0-15 will read from
+ `firstScaleByte` and lanes 16-31 will read from `firstScaleByte` + 1.
+ For example:
+ ```mlir
+ // Input: 8-element vector of F8E4M3FN, converting to F32
+ // Lanes 0-15 read from byte 0, lanes 16-31 read from byte 1
+ %result = amdgpu.scaled_ext_packed816 %source scale(%scales)
+ blockSize(32) firstScaleLane(0) firstScaleByte(0)
+ : vector<8xf8E4M3FN>, vector<4xf8E8M0FNU> -> vector<8xf32>
+
+ // Input: 16-element vector of F6E2M3FN, converting to F16
+ // Lanes 0-15 read from byte 2, lanes 16-31 read from byte 3
+ %result = amdgpu.scaled_ext_packed816 %source scale(%scales)
+ blockSize(32) firstScaleLane(1) firstScaleByte(2)
+ : vector<16xf6E2M3FN>, vector<4xf8E8M0FNU> -> vector<16xf16>
+ ```
+
+ However, when the block size is 16, `firstScaleByte` can be 0 or 1.
+ Lanes 0-15 read from the `firstScaleByte`th element of the scale vectors,
+ while lanes 16-31 read from `firstScaleByte` + 2.
+ For example:
+ ```mlir
+ // Input: 8-element vector of F8E5M2, converting to BF16
+ // Lanes 0-15 read from byte 0, lanes 16-31 read from byte 2 (0+2)
+ %result = amdgpu.scaled_ext_packed816 %source scale(%scales)
+ blockSize(16) firstScaleLane(0) firstScaleByte(0)
+ : vector<8xf8E5M2>, vector<4xf8E8M0FNU> -> vector<8xbf16>
+
+ // Input: 16-element vector of F6E3M2FN, converting to F32
+ // Lanes 0-15 read from byte 1, lanes 16-31 read from byte 3 (1+2)
+ %result = amdgpu.scaled_ext_packed816 %source scale(%scales)
+ blockSize(16) firstScaleLane(1) firstScaleByte(1)
+ : vector<16xf6E3M2FN>, vector<4xf8E8M0FNU> -> vector<16xf32>
+ ```
+
+ Note: the layout for the scales generally mirrors how the WMMA
+ instructions use for matix scales. These selection operands allows
+ one to choose portions of the matrix to convert.
+
+ Available on gfx1250+.
+ }];
+
+ let assemblyFormat = [{
+ attr-dict $source
+ `scale` `(` $scale `)`
+ `blockSize` `(` $blockSize `)`
+ `firstScaleLane` `(` $firstScaleLane`)`
+ `firstScaleByte` `(` $firstScaleByte `)`
+ `:` type($source) `,` type($scale) `->` type($res)
+ }];
+
+ let hasVerifier = 1;
+
+}
+
def AMDGPU_ScaledExtPackedOp
: AMDGPU_Op<"scaled_ext_packed", [Pure]>,
Arguments<(
@@ -860,7 +951,7 @@ def AMDGPU_MFMAOp :
based on the provided `m`, `k`, `n`, and `nBlks` attributes, along with the
types of the source and destination arguments.
- For information on the layouts of the input and output matrces (which are stored
+ For information on the layouts of the input and output matrices (which are stored
in `sourceA`, `sourceB`, `destC`, and `destD`), see the CDNA ISA documentation.
The `cbsz`, `abid`, and `blgp` parameters control how the lanes of the wave
diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
index e52b7d2..12a7935 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
@@ -330,7 +330,6 @@ def AffineForOp : Affine_Op<"for",
Speculation::Speculatability getSpeculatability();
}];
- let hasCanonicalizer = 1;
let hasCustomAssemblyFormat = 1;
let hasFolder = 1;
let hasRegionVerifier = 1;
diff --git a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td
index 20c9097..a38cf41 100644
--- a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td
+++ b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td
@@ -1229,37 +1229,50 @@ def Arith_ScalingExtFOp
let summary = "Upcasts input floats using provided scales values following "
"OCP MXFP Spec";
let description = [{
- This operation upcasts input floating-point values using provided scale
- values. It expects both scales and the input operand to be of the same shape,
- making the operation elementwise. Scales are usually calculated per block
- following the OCP MXFP spec as described in https://arxiv.org/abs/2310.10537.
-
- If scales are calculated per block where blockSize != 1, then scales may
- require broadcasting to make this operation elementwise. For example, let's
- say the input is of shape `<dim1 x dim2 x ... dimN>`. Given blockSize != 1 and
- assuming quantization happens on the last axis, the input can be reshaped to
- `<dim1 x dim2 x ... (dimN/blockSize) x blockSize>`. Scales will be calculated
- per block on the last axis. Therefore, scales will be of shape
- `<dim1 x dim2 x ... (dimN/blockSize) x 1>`. Scales could also be of some other
- shape as long as it is broadcast compatible with the input, e.g.,
- `<1 x 1 x ... (dimN/blockSize) x 1>`.
-
- In this example, before calling into `arith.scaling_extf`, scales must be
- broadcasted to `<dim1 x dim2 x dim3 ... (dimN/blockSize) x blockSize>`. Note
- that there could be multiple quantization axes. Internally,
- `arith.scaling_extf` would perform the following:
+ This operation upcasts input floating-point values using provided scale
+ values. It expects both scales and the input operand to be of the same shape,
+ making the operation elementwise. Scales are usually calculated per block
+ following the OCP MXFP spec as described in https://arxiv.org/abs/2310.10537.
- ```
- resultTy = get_type(result)
- scaleTy = get_type(scale)
- inputTy = get_type(input)
- scale.exponent = arith.truncf(scale) : scaleTy to f8E8M0
- scale.extf = arith.extf(scale.exponent) : f8E8M0 to resultTy
- input.extf = arith.extf(input) : inputTy to resultTy
- result = arith.mulf(scale.extf, input.extf)
+ If scales are calculated per block where blockSize != 1, then scales may
+ require broadcasting to make this operation elementwise. For example, let's
+ say the input is of shape `<dim1 x dim2 x ... dimN>`. Given blockSize != 1 and
+ assuming quantization happens on the last axis, the input can be reshaped to
+ `<dim1 x dim2 x ... (dimN/blockSize) x blockSize>`. Scales will be calculated
+ per block on the last axis. Therefore, scales will be of shape
+ `<dim1 x dim2 x ... (dimN/blockSize) x 1>`. Scales could also be of some other
+ shape as long as it is broadcast compatible with the input, e.g.,
+ `<1 x 1 x ... (dimN/blockSize) x 1>`.
+
+ In this example, before calling into `arith.scaling_extf`, scales must be
+ broadcasted to `<dim1 x dim2 x dim3 ... (dimN/blockSize) x blockSize>`. Note
+ that there could be multiple quantization axes. Internally,
+ `arith.scaling_extf` would perform the following:
+
+ ```mlir
+ // Cast scale to result type.
+ %0 = arith.truncf %1 : f32 to f8E8M0FNU
+ %1 = arith.extf %0 : f8E8M0FNU to f16
+
+ // Cast input to result type.
+ %2 = arith.extf %3 : f4E2M1FN to f16
+
+ // Perform scaling
+ %3 = arith.mulf %2, %1 : f16
```
It propagates NaN values. Therefore, if either scale or the input element
contains NaN, then the output element value will also be a NaN.
+
+ Example:
+
+ ```mlir
+ // Upcast from f4E2M1FN to f32.
+ %a = arith.scaling_extf %b, %c : f4E2M1FN, f8E8M0FNU to f32
+
+ // Element-wise upcast with broadcast (blockSize = 32).
+ %f = vector.broadcast %g : vector<1xf8E8M0FNU> to vector<32xf8E8M0FNU>
+ %h = arith.scaling_extf %i, %f : vector<32xf4E2M1FN>, vector<32xf8E8M0FNU> to vector<32xbf16>
+ ```
}];
let hasVerifier = 1;
let assemblyFormat =
@@ -1397,14 +1410,27 @@ def Arith_ScalingTruncFOp
that there could be multiple quantization axes. Internally,
`arith.scaling_truncf` would perform the following:
+ ```mlir
+ // Cast scale to input type.
+ %0 = arith.truncf %1 : f32 to f8E8M0FNU
+ %1 = arith.extf %0 : f8E8M0FNU to f16
+
+ // Perform scaling.
+ %3 = arith.divf %2, %1 : f16
+
+ // Cast to result type.
+ %4 = arith.truncf %3 : f16 to f4E2M1FN
```
- scaleTy = get_type(scale)
- inputTy = get_type(input)
- resultTy = get_type(result)
- scale.exponent = arith.truncf(scale) : scaleTy to f8E8M0
- scale.extf = arith.extf(scale.exponent) : f8E8M0 to inputTy
- result = arith.divf(input, scale.extf)
- result.cast = arith.truncf(result, resultTy)
+
+ Example:
+
+ ```mlir
+ // Downcast from f32 to f4E2M1FN.
+ %a = arith.scaling_truncf %b, %c : f32, f8E8M0FNU to f4E2M1FN
+
+ // Element-wise downcast with broadcast (blockSize = 32).
+ %f = vector.broadcast %g : vector<1xf8E8M0FNU> to vector<32xf8E8M0FNU>
+ %h = arith.scaling_truncf %i, %f : vector<32xbf16>, vector<32xf8E8M0FNU> to vector<32xf4E2M1FN>
```
}];
let hasVerifier = 1;
diff --git a/mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h b/mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h
index 035235f..fccb49d 100644
--- a/mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h
+++ b/mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h
@@ -1,4 +1,4 @@
-//===- Passes.h - GPU NVVM pipeline entry points --------------------------===//
+//===- Passes.h - GPU pipeline entry points--------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -60,6 +60,52 @@ struct GPUToNVVMPipelineOptions
llvm::cl::init(false)};
};
+// Options for the gpu to xevm pipeline.
+struct GPUToXeVMPipelineOptions
+ : public PassPipelineOptions<GPUToXeVMPipelineOptions> {
+ PassOptions::Option<std::string> xegpuOpLevel{
+ *this, "xegpu-op-level",
+ llvm::cl::desc("Granularity of XeGPU operations to target: workgroup | "
+ "subgroup | lane"),
+ llvm::cl::init("workgroup")};
+ // General lowering controls.
+ PassOptions::Option<bool> use64bitIndex{
+ *this, "use-64bit-index",
+ llvm::cl::desc("Bitwidth of the index type (host & device)"),
+ llvm::cl::init(true)};
+ PassOptions::Option<bool> kernelBarePtrCallConv{
+ *this, "kernel-bare-ptr-calling-convention",
+ llvm::cl::desc("Use bare pointer calling convention for device kernels"),
+ llvm::cl::init(false)};
+ PassOptions::Option<bool> hostBarePtrCallConv{
+ *this, "host-bare-ptr-calling-convention",
+ llvm::cl::desc("Use bare pointer calling convention for host launches"),
+ llvm::cl::init(false)};
+ PassOptions::Option<std::string> binaryFormat{
+ *this, "binary-format",
+ llvm::cl::desc("Final GPU binary emission format (e.g. fatbin)"),
+ llvm::cl::init("fatbin")};
+ // Options mirroring xevm-attach-target (GpuXeVMAttachTarget).
+ PassOptions::Option<std::string> xevmModuleMatcher{
+ *this, "xevm-module-matcher",
+ llvm::cl::desc("Regex to match gpu.module names for XeVM target attach"),
+ llvm::cl::init("")};
+ PassOptions::Option<std::string> zebinTriple{
+ *this, "zebin-triple", llvm::cl::desc("Target triple for XeVM codegen"),
+ llvm::cl::init("spirv64-unknown-unknown")};
+ PassOptions::Option<std::string> zebinChip{
+ *this, "zebin-chip", llvm::cl::desc("Target chip (e.g. pvc, bmg)"),
+ llvm::cl::init("bmg")};
+ PassOptions::Option<unsigned> optLevel{
+ *this, "opt-level",
+ llvm::cl::desc("Optimization level for attached target/codegen"),
+ llvm::cl::init(2)};
+ PassOptions::Option<std::string> cmdOptions{
+ *this, "igc-cmd-options",
+ llvm::cl::desc("Additional downstream compiler command line options"),
+ llvm::cl::init("")};
+};
+
//===----------------------------------------------------------------------===//
// Building and Registering.
//===----------------------------------------------------------------------===//
@@ -70,8 +116,15 @@ struct GPUToNVVMPipelineOptions
void buildLowerToNVVMPassPipeline(OpPassManager &pm,
const GPUToNVVMPipelineOptions &options);
-/// Register all pipeleines for the `gpu` dialect.
+/// Adds the GPU to XeVM pipeline to the given pass manager. Transforms main
+/// dialects into XeVM targets. Begins with GPU code regions, then handles host
+/// code.
+void buildLowerToXeVMPassPipeline(OpPassManager &pm,
+ const GPUToXeVMPipelineOptions &options);
+
+/// Register all pipelines for the `gpu` dialect.
void registerGPUToNVVMPipeline();
+void registerGPUToXeVMPipeline();
} // namespace gpu
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
index 68f31e6..d2df244 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
@@ -574,6 +574,30 @@ def ROCDL_wmma_f32_16x16x16_fp8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x16.fp8_b
def ROCDL_wmma_f32_16x16x16_bf8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x16.bf8_bf8", [1]>;
def ROCDL_wmma_f32_16x16x16_bf8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x16.bf8_fp8", [1]>;
def ROCDL_wmma_i32_16x16x32_iu4 : ROCDL_Wmma_IntrOp<"wmma.i32.16x16x32.iu4", [1]>;
+// Available from gfx1250
+def ROCDL_wmma_f32_16x16x4_f32 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x4.f32", [1]>;
+def ROCDL_wmma_f32_16x16x32_bf16 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x32.bf16", [1]>;
+def ROCDL_wmma_f32_16x16x32_f16 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x32.f16", [1]>;
+def ROCDL_wmma_f16_16x16x32_f16 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x32.f16", [1]>;
+def ROCDL_wmma_bf16_16x16x32_bf16 : ROCDL_Wmma_IntrOp<"wmma.bf16.16x16x32.bf16", [1]>;
+def ROCDL_wmma_bf16f32_16x16x32_bf16 : ROCDL_Wmma_IntrOp<"wmma.bf16f32.16x16x32.bf16", [1,5]>;
+def ROCDL_wmma_f32_16x16x64_fp8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x64.fp8_fp8", [0]>;
+def ROCDL_wmma_f32_16x16x64_fp8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x64.fp8_bf8", [0]>;
+def ROCDL_wmma_f32_16x16x64_bf8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x64.bf8_fp8", [0]>;
+def ROCDL_wmma_f32_16x16x64_bf8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x64.bf8_bf8", [0]>;
+def ROCDL_wmma_f16_16x16x64_fp8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x64.fp8_fp8", [0]>;
+def ROCDL_wmma_f16_16x16x64_fp8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x64.fp8_bf8", [0]>;
+def ROCDL_wmma_f16_16x16x64_bf8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x64.bf8_fp8", [0]>;
+def ROCDL_wmma_f16_16x16x64_bf8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x64.bf8_bf8", [0]>;
+def ROCDL_wmma_f32_16x16x128_fp8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x128.fp8_fp8", [0]>;
+def ROCDL_wmma_f32_16x16x128_fp8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x128.fp8_bf8", [0]>;
+def ROCDL_wmma_f32_16x16x128_bf8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x128.bf8_fp8", [0]>;
+def ROCDL_wmma_f32_16x16x128_bf8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x128.bf8_bf8", [0]>;
+def ROCDL_wmma_f16_16x16x128_fp8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x128.fp8_fp8", [0]>;
+def ROCDL_wmma_f16_16x16x128_fp8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x128.fp8_bf8", [0]>;
+def ROCDL_wmma_f16_16x16x128_bf8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x128.bf8_fp8", [0]>;
+def ROCDL_wmma_f16_16x16x128_bf8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x128.bf8_bf8", [0]>;
+def ROCDL_wmma_i32_16x16x64_iu8 : ROCDL_Wmma_IntrOp<"wmma.i32.16x16x64.iu8", [1]>;
//===---------------------------------------------------------------------===//
// LDS transpose intrinsics (available in GFX950)
@@ -1143,6 +1167,7 @@ foreach smallT = [
ScaleArgInfo<ROCDL_V16BF16Type, "Bf16">,
ScaleArgInfo<ROCDL_V16F32Type, "F32">,
] in {
+ // Up-scaling
def ROCDL_CvtPkScalePk16 # largeT.nameForOp # smallT.nameForOp # Op :
ROCDL_ConcreteNonMemIntrOp<"cvt.scale.pk16." # largeT.name # "." # smallT.name,
[Pure], 1, [2], ["scaleSel"]>,
@@ -1158,6 +1183,42 @@ foreach smallT = [
}];
}
+
+ // Down-scaling
+ def ROCDL_CvtScaleF32Pk16 # smallT.nameForOp # largeT.nameForOp # Op :
+ ROCDL_ConcreteNonMemIntrOp<"cvt.scalef32.pk16." # smallT.name # "." # largeT.name,
+ [Pure], 1>,
+ Arguments<(ins largeT.type:$src, F32:$scale)> {
+ let results = (outs smallT.type:$res);
+ let summary = "Scale and convert packed "
+ # largeT.name # " to packed " # smallT.name ;
+ let description = [{
+ Convert 8 packed }] # largeT.name # [{ values to packed }]
+ # smallT.name # [{, multiplying by the exponent part of `scale`
+ before doing so. This op is for gfx1250+ arch.
+ }];
+ let assemblyFormat = [{
+ attr-dict $src `,` $scale `:` type($res)
+ }];
+ }
+
+ def ROCDL_CvtScaleF32SrPk16 # smallT.nameForOp # largeT.nameForOp # Op :
+ ROCDL_ConcreteNonMemIntrOp<"cvt.scalef32.sr.pk16." # smallT.name # "." # largeT.name,
+ [Pure], 1>,
+ Arguments<(ins largeT.type:$src, I32:$seed, F32:$scale)> {
+ let results = (outs smallT.type:$res);
+ let summary = "Scale and convert packed "
+ # largeT.name # " to packed " # smallT.name # " with stochastic rounding";
+ let description = [{
+ Convert 8 packed }] # largeT.name # [{ values to packed }]
+ # smallT.name # [{, multiplying by the exponent part of `scale`
+ before doing so and apply stochastic rounding. This op is for gfx1250+ arch.
+ }];
+ let assemblyFormat = [{
+ attr-dict $src `,` $seed `,` $scale `:` type($res)
+ }];
+ }
+
} // foreach largeT
} // foreach smallTOp
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index ae7a085..c89fc59 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -25,7 +25,6 @@
#include "mlir/Interfaces/TilingInterface.h"
#include "mlir/Transforms/DialectConversion.h"
#include "llvm/ADT/SmallBitVector.h"
-#include "llvm/ADT/SmallSet.h"
namespace mlir {
namespace bufferization {
@@ -621,35 +620,43 @@ LogicalResult rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
/// In the future, more general interfaces can be devised to encode similar
/// shape evolutions and map between an op and its operands.
SmallVector<OpFoldResult>
-computePaddedShape(RewriterBase &rewriter, TypedValue<RankedTensorType> v,
+computePaddedShape(OpBuilder &, TypedValue<RankedTensorType> v,
AffineMap indexingMap, ArrayRef<OpFoldResult> indexingSizes,
const PadTilingInterfaceOptions &options);
using PadSizeComputationFunction =
std::function<FailureOr<SmallVector<OpFoldResult>>(
- RewriterBase &, OpOperand &, ArrayRef<Range>,
+ OpBuilder &, OpOperand &, ArrayRef<Range>,
const PadTilingInterfaceOptions &)>;
/// Specific helper for Linalg ops.
-FailureOr<SmallVector<OpFoldResult>> computeIndexingMapOpInterfacePaddedShape(
- RewriterBase &rewriter, OpOperand &operandToPad,
- ArrayRef<Range> iterationDomain, const PadTilingInterfaceOptions &options);
+FailureOr<SmallVector<OpFoldResult>>
+computeIndexingMapOpInterfacePaddedShape(OpBuilder &, OpOperand &operandToPad,
+ ArrayRef<Range> iterationDomain,
+ const PadTilingInterfaceOptions &);
+
+/// Operations and values created in the process of padding a TilingInterface
+/// operation.
+struct PadTilingInterfaceResult {
+ /// The operands of the padded op.
+ SmallVector<tensor::PadOp> padOps;
+ /// The padded op, a clone of `toPad` with padded operands.
+ TilingInterface paddedOp;
+ /// Slices of the padded op's results, same types as `toPad`.
+ SmallVector<Value> replacements;
+};
-/// Pad the iterator dimensions `options.paddingDimensions` of `opToPad`.
-///
+/// Pad the iterator dimensions of `toPad`.
/// * "options.paddingSizes" indicates that each padding dimension should be
/// padded to the specified padding size.
/// * "options.padToMultipleOf" indicates that the paddingSizes should be
// interpreted as the bounding box (dynamic) value to pad to.
/// * Use "options.paddingValues" to set the padding value of the created
// tensor::PadOp.
-/// * The tensor::PadOp is returned on success.
-
-FailureOr<TilingInterface>
-rewriteAsPaddedOp(RewriterBase &rewriter, TilingInterface opToPad,
- const PadTilingInterfaceOptions &constOptions,
- SmallVector<tensor::PadOp> &padOps,
- const PadSizeComputationFunction &computePaddingSizeFun =
+FailureOr<PadTilingInterfaceResult>
+rewriteAsPaddedOp(OpBuilder &, TilingInterface toPad,
+ PadTilingInterfaceOptions options,
+ const PadSizeComputationFunction & =
&computeIndexingMapOpInterfacePaddedShape);
namespace detail {
diff --git a/mlir/include/mlir/Dialect/SMT/IR/SMTOps.td b/mlir/include/mlir/Dialect/SMT/IR/SMTOps.td
index 3143ab7..99b22e5 100644
--- a/mlir/include/mlir/Dialect/SMT/IR/SMTOps.td
+++ b/mlir/include/mlir/Dialect/SMT/IR/SMTOps.td
@@ -220,8 +220,6 @@ def YieldOp : SMTOp<"yield", [
Pure,
Terminator,
ReturnLike,
- ParentOneOf<["smt::SolverOp", "smt::CheckOp",
- "smt::ForallOp", "smt::ExistsOp"]>,
]> {
let summary = "terminator operation for various regions of SMT operations";
let arguments = (ins Variadic<AnyType>:$values);
diff --git a/mlir/include/mlir/Dialect/Shard/IR/ShardOps.td b/mlir/include/mlir/Dialect/Shard/IR/ShardOps.td
index b9d7163..5e68f75e 100644
--- a/mlir/include/mlir/Dialect/Shard/IR/ShardOps.td
+++ b/mlir/include/mlir/Dialect/Shard/IR/ShardOps.td
@@ -494,7 +494,9 @@ def Shard_AllGatherOp : Shard_CollectiveCommunicationOpBase<"all_gather", [
]> {
let summary = "All-gather over a device grid.";
let description = [{
- Gathers along the `gather_axis` tensor axis.
+ Concatenates all tensor slices from a device group defined by `grid_axes` along
+ the tensor dimension `gather_axis` and replicates the result across all devices
+ in the group.
Example:
```mlir
@@ -546,10 +548,13 @@ def Shard_AllReduceOp : Shard_CollectiveCommunicationOpBase<"all_reduce", [
SameOperandsAndResultShape]> {
let summary = "All-reduce over a device grid.";
let description = [{
- The accumulation element type is specified by the result type and
- it does not need to match the input element type.
- The input element is converted to the result element type before
- performing the reduction.
+ Reduces the input tensor across all devices within the groups defined by
+ `grid_axes`, using the specified reduction method. The operation performs an
+ element-wise reduction over the tensor slices from all devices in each group.
+ Each device in a group receives a replicated copy of the reduction result.
+ The accumulation element type is determined by the result type and does not
+ need to match the input element type. Before performing the reduction, each
+ input element is converted to the result element type.
Attributes:
`reduction`: Indicates the reduction method.
@@ -583,13 +588,15 @@ def Shard_AllSliceOp : Shard_CollectiveCommunicationOpBase<"all_slice", [
SameOperandsAndResultElementType,
SameOperandsAndResultRank
]> {
- let summary = "All-slice over a device grid. This is the inverse of all-gather.";
+ let summary = "All-slice over a device grid.";
let description = [{
- Slice along the `slice_axis` tensor axis.
- This operation can be thought of as the inverse of all-gather.
- Technically, it is not required that all processes have the same input tensor.
- Each process will slice a piece of its local tensor based on its in-group device index.
- The operation does not communicate data between devices.
+ Within each device group defined by `grid_axes`, slices the input tensor along
+ the `slice_axis` dimension. It can be viewed as the inverse of an all-gather if
+ the input data is replicated along the `slice_axis`.
+ Each process simply crops its local data to the slice corresponding to its
+ in-group device index.
+ Notice: `AllSliceOp` does not involve any communication between devices and
+ devices within a group may not have replicated input data.
Example:
```mlir
@@ -610,7 +617,7 @@ def Shard_AllSliceOp : Shard_CollectiveCommunicationOpBase<"all_slice", [
```
Result:
```
- gather tensor
+ slice tensor
axis 1
------------>
+-------+-------+
@@ -646,8 +653,10 @@ def Shard_AllToAllOp : Shard_CollectiveCommunicationOpBase<"all_to_all", [
SameOperandsAndResultRank]> {
let summary = "All-to-all over a device grid.";
let description = [{
- Performs an all-to-all on tensor pieces split along `split_axis`.
- The resulting pieces are concatenated along `concat_axis` on ech device.
+ Each participant logically splits its input along split_axis,
+ then scatters the resulting pieces across the group defined by `grid_axes`.
+ After receiving data pieces from other participants' scatters,
+ it concatenates them along concat_axis to produce the final result.
Example:
```
@@ -702,10 +711,9 @@ def Shard_BroadcastOp : Shard_CollectiveCommunicationOpBase<"broadcast", [
]> {
let summary = "Broadcast over a device grid.";
let description = [{
- Broadcast the tensor on `root` to all devices in each respective group.
- The operation broadcasts along grid axes `grid_axes`.
- The `root` device specifies the in-group multi-index that is broadcast to
- all other devices in the group.
+ Copies the input tensor on `root` to all devices in each group defined by
+ `grid_axes`. The `root` device is defined by its in-group multi-index.
+ The contents of input tensors on non-root devices are ignored.
Example:
```
@@ -722,7 +730,7 @@ def Shard_BroadcastOp : Shard_CollectiveCommunicationOpBase<"broadcast", [
+-------+-------+ | broadcast
device (0, 0) -> | 1 2 | 3 4 | <- device (0, 1) | along axis 0
+-------+-------+ ↓
- device (1, 0) -> | | | <- device (1, 1)
+ device (1, 0) -> | * * | * * | <- device (1, 1)
+-------+-------+
```
@@ -758,11 +766,10 @@ def Shard_GatherOp : Shard_CollectiveCommunicationOpBase<"gather", [
]> {
let summary = "Gather over a device grid.";
let description = [{
- Gathers on device `root` along the `gather_axis` tensor axis.
- `root` specifies the coordinates of a device along `grid_axes`.
- It uniquely identifies the root device for each device group.
- The result tensor on non-root devices is undefined.
- Using it will result in undefined behavior.
+ Concatenates all tensor slices from a device group defined by `grid_axes` along
+ the tensor dimension `gather_axis` and returns the resulting tensor on each
+ `root` device. The result on all other (non-root) devices is undefined.
+ The `root` device is defined by its in-group multi-index.
Example:
```mlir
@@ -821,7 +828,9 @@ def Shard_RecvOp : Shard_CollectiveCommunicationOpBase<"recv", [
]> {
let summary = "Send over a device grid.";
let description = [{
- Receive from a device within a device group.
+ Receive tensor from device `source`, which is defined by its in-group
+ multi-index. The groups are defined by `grid_axes`.
+ The content of input tensor is ignored.
}];
let arguments = !con(commonArgs, (ins
AnyNon0RankedTensor:$input,
@@ -845,13 +854,15 @@ def Shard_ReduceOp : Shard_CollectiveCommunicationOpBase<"reduce", [
]> {
let summary = "Reduce over a device grid.";
let description = [{
- Reduces on device `root` within each device group.
- `root` specifies the coordinates of a device along `grid_axes`.
- It uniquely identifies the root device within its device group.
- The accumulation element type is specified by the result type and
- it does not need to match the input element type.
- The input element is converted to the result element type before
- performing the reduction.
+ Reduces the input tensor across all devices within the groups defined by
+ `grid_axes`, using the specified reduction method. The operation performs an
+ element-wise reduction over the tensor slices from all devices in each group.
+ The reduction result will be returned on the `root` device of each group.
+ It is undefined on all other (non-root) devices.
+ The `root` device is defined by its in-group multi-index.
+ The accumulation element type is determined by the result type and does not
+ need to match the input element type. Before performing the reduction, each
+ input element is converted to the result element type.
Attributes:
`reduction`: Indicates the reduction method.
@@ -886,16 +897,18 @@ def Shard_ReduceScatterOp : Shard_CollectiveCommunicationOpBase<"reduce_scatter"
SameOperandsAndResultRank]> {
let summary = "Reduce-scatter over a device grid.";
let description = [{
- After the reduction, the result is scattered within each device group.
- The tensor is split along `scatter_axis` and the pieces distributed
- across the device group.
+ Reduces the input tensor across all devices within the groups defined by
+ `grid_axes` using the specified reduction method. The reduction is performed
+ element-wise across the tensor pieces from all devices in the group.
+ After reduction, the reduction result is scattered (split and distributed)
+ across the device group along `scatter_axis`.
Example:
```
shard.grid @grid0(shape = 2x2)
...
%1 = shard.reduce_scatter %0 on @grid0 grid_axes = [1]
reduction = <max> scatter_axis = 0
- : tensor<3x4xf32> -> tensor<1x4xf64>
+ : tensor<2x2xf32> -> tensor<1x2xf64>
```
Input:
```
@@ -916,13 +929,13 @@ def Shard_ReduceScatterOp : Shard_CollectiveCommunicationOpBase<"reduce_scatter"
Result:
```
+-------+
- | 6 8 | <- devices (0, 0)
+ | 5 6 | <- devices (0, 0)
+-------+
- | 10 12 | <- devices (0, 1)
+ | 7 8 | <- devices (0, 1)
+-------+
- | 22 24 | <- devices (1, 0)
+ | 13 14 | <- devices (1, 0)
+-------+
- | 26 28 | <- devices (1, 1)
+ | 15 16 | <- devices (1, 1)
+-------+
```
}];
@@ -950,8 +963,10 @@ def Shard_ScatterOp : Shard_CollectiveCommunicationOpBase<"scatter", [
]> {
let summary = "Scatter over a device grid.";
let description = [{
- For each device group split the input tensor on the `root` device along
- axis `scatter_axis` and scatter the parts across the group devices.
+ For each device group defined by `grid_axes`, the input tensor on the `root`
+ device is split along axis `scatter_axis` and distributed across the group.
+ The content of the input on all other (non-root) devices is ignored.
+ The `root` device is defined by its in-group multi-index.
Example:
```
@@ -968,8 +983,8 @@ def Shard_ScatterOp : Shard_CollectiveCommunicationOpBase<"scatter", [
(0, 1)
↓
+-------+-------+ | scatter tensor
- device (0, 0) -> | | | | axis 0
- | | | ↓
+ device (0, 0) -> | * * | * * | | axis 0
+ | * * | * * | ↓
+-------+-------+
device (1, 0) -> | 1 2 | 5 6 |
| 3 4 | 7 8 |
@@ -1018,7 +1033,8 @@ def Shard_SendOp : Shard_CollectiveCommunicationOpBase<"send", [
]> {
let summary = "Send over a device grid.";
let description = [{
- Send from one device to another within a device group.
+ Send input tensor to device `destination`, which is defined by its in-group
+ multi-index. The groups are defined by `grid_axes`.
}];
let arguments = !con(commonArgs, (ins
AnyNon0RankedTensor:$input,
@@ -1043,12 +1059,11 @@ def Shard_ShiftOp : Shard_CollectiveCommunicationOpBase<"shift", [
]> {
let summary = "Shift over a device grid.";
let description = [{
- Within each device group shift along grid axis `shift_axis` by an offset
- `offset`.
- The result on devices that do not have a corresponding source is undefined.
- `shift_axis` must be one of `grid_axes`.
- If the `rotate` attribute is present,
- instead of a shift a rotation is done.
+ Within each device group defined by `grid_axes`, shifts input tensors along the
+ device grid's axis `shift_axis` by the specified offset. The `shift_axis` must
+ be one of the `grid_axes`. If the `rotate` attribute is set, the shift is circular.
+ That is, the offset wraps around according to the group size along `shift_axis`.
+ Otherwise, the results on devices without a corresponding source are undefined.
Example:
```
diff --git a/mlir/include/mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.h b/mlir/include/mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.h
index fc69b03..f6353a9 100644
--- a/mlir/include/mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.h
+++ b/mlir/include/mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.h
@@ -10,6 +10,7 @@
#define MLIR_DIALECT_TRANSFORM_SMTEXTENSION_SMTEXTENSIONOPS_H
#include "mlir/Bytecode/BytecodeOpInterface.h"
+#include "mlir/Dialect/SMT/IR/SMTOps.h"
#include "mlir/Dialect/Transform/IR/TransformDialect.h"
#include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h"
#include "mlir/IR/OpDefinition.h"
diff --git a/mlir/include/mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.td b/mlir/include/mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.td
index b987cb3..9d9783a 100644
--- a/mlir/include/mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.td
+++ b/mlir/include/mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.td
@@ -16,7 +16,7 @@ include "mlir/Interfaces/SideEffectInterfaces.td"
def ConstrainParamsOp : Op<Transform_Dialect, "smt.constrain_params", [
DeclareOpInterfaceMethods<TransformOpInterface>,
DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
- NoTerminator
+ SingleBlockImplicitTerminator<"::mlir::smt::YieldOp">
]> {
let cppNamespace = [{ mlir::transform::smt }];
@@ -24,14 +24,20 @@ def ConstrainParamsOp : Op<Transform_Dialect, "smt.constrain_params", [
let description = [{
Allows expressing constraints on params using the SMT dialect.
- Each Transform dialect param provided as an operand has a corresponding
+ Each Transform-dialect param provided as an operand has a corresponding
argument of SMT-type in the region. The SMT-Dialect ops in the region use
- these arguments as operands.
+ these params-as-SMT-vars as operands, thereby expressing relevant
+ constraints on their allowed values.
+
+ Computations w.r.t. passed-in params can also be expressed through the
+ region's SMT-ops. Namely, the constraints express relationships to other
+ SMT-variables which can then be yielded from the region (with `smt.yield`).
The semantics of this op is that all the ops in the region together express
a constraint on the params-interpreted-as-smt-vars. The op fails in case the
expressed constraint is not satisfiable per SMTLIB semantics. Otherwise the
- op succeeds.
+ op succeeds and any one satisfying assignment is used to map the
+ SMT-variables yielded in the region to `transform.param`s.
---
@@ -42,9 +48,10 @@ def ConstrainParamsOp : Op<Transform_Dialect, "smt.constrain_params", [
}];
let arguments = (ins Variadic<TransformParamTypeInterface>:$params);
+ let results = (outs Variadic<TransformParamTypeInterface>:$results);
let regions = (region SizedRegion<1>:$body);
let assemblyFormat =
- "`(` $params `)` attr-dict `:` type(operands) $body";
+ "`(` $params `)` attr-dict `:` functional-type(operands, results) $body";
let hasVerifier = 1;
}
diff --git a/mlir/include/mlir/Dialect/WasmSSA/IR/WasmSSAOps.td b/mlir/include/mlir/Dialect/WasmSSA/IR/WasmSSAOps.td
index b80ee2c..e9425e8 100644
--- a/mlir/include/mlir/Dialect/WasmSSA/IR/WasmSSAOps.td
+++ b/mlir/include/mlir/Dialect/WasmSSA/IR/WasmSSAOps.td
@@ -43,9 +43,41 @@ class WasmSSA_BlockLikeOp<string mnemonic, string summaryStr> :
let assemblyFormat = "(`(`$inputs^`)` `:` type($inputs))? attr-dict `:` $body `>` $target";
}
-def WasmSSA_BlockOp : WasmSSA_BlockLikeOp<"block", "Create a nesting level"> {}
+def WasmSSA_BlockOp : WasmSSA_BlockLikeOp<
+ "block",
+ "Create a nesting level with a label at its exit."> {
+ let description = [{
+ Defines a Wasm block, creating a new nested scope.
+ A block contains a body region and an optional list of input values.
+ Control can enter the block and later branch out to the block target.
+ Example:
+
+ ```mlir
+
+ wasmssa.block {
+
+ // instructions
+
+ } > ^successor
+ }];
+}
+
+def WasmSSA_LoopOp : WasmSSA_BlockLikeOp<
+ "loop",
+ "Create a nesting level that define its entry as jump target."> {
+ let description = [{
+ Represents a Wasm loop construct. This defines a nesting level with
+ a label at the entry of the region.
-def WasmSSA_LoopOp : WasmSSA_BlockLikeOp<"loop", "Create a nesting level similar to Block Op, except that it has itself as a successor."> {}
+ Example:
+
+ ```mlir
+
+ wasmssa.loop {
+
+ } > ^successor
+ }];
+}
def WasmSSA_BlockReturnOp : WasmSSA_Op<"block_return", [Terminator,
DeclareOpInterfaceMethods<LabelBranchingOpInterface>]> {
@@ -55,9 +87,16 @@ def WasmSSA_BlockReturnOp : WasmSSA_Op<"block_return", [Terminator,
::mlir::Block* getTarget();
}];
let description = [{
- Marks a return from the current block.
+ Escape from the current nesting level and return the control flow to its successor.
+ Optionally, mark the arguments that should be transfered to the successor block.
- Example:
+ This shouldn't be confused with branch operations that targets the label defined
+ by the nesting level operation.
+
+ For instance, a `wasmssa.block_return` in a loop will give back control to the
+ successor of the loop, where a `branch` targeting the loop will flow back to the entry block of the loop.
+
+ Example:
```mlir
wasmssa.block_return
@@ -127,12 +166,18 @@ def WasmSSA_FuncOp : WasmSSA_Op<"func", [
- Arguments of the entry block of type `!wasm<local T>`, with T the corresponding type
in the function type.
+ By default, `wasmssa.func` have nested visibility. Functions exported by the module
+ are marked with the exported attribute. This gives them public visibility.
+
Example:
```mlir
- // A simple function with no arguments that returns a float32
+ // Internal function with no arguments that returns a float32
wasmssa.func @my_f32_func() -> f32
+ // Exported function with no arguments that returns a float32
+ wasmssa.func exported @my_f32_func() -> f32
+
// A function that takes a local ref argument
wasmssa.func @i64_wrap(%a: !wasmssa<local ref to i64>) -> i32
```
@@ -141,7 +186,7 @@ def WasmSSA_FuncOp : WasmSSA_Op<"func", [
WasmSSA_FuncTypeAttr: $functionType,
OptionalAttr<DictArrayAttr>:$arg_attrs,
OptionalAttr<DictArrayAttr>:$res_attrs,
- DefaultValuedAttr<StrAttr, "\"nested\"">:$sym_visibility);
+ UnitAttr: $exported);
let regions = (region AnyRegion: $body);
let extraClassDeclaration = [{
@@ -162,6 +207,12 @@ def WasmSSA_FuncOp : WasmSSA_Op<"func", [
/// Returns the result types of this function.
ArrayRef<Type> getResultTypes() { return getFunctionType().getResults(); }
+
+ ::mlir::SymbolTable::Visibility getVisibility() {
+ return getExported() ?
+ ::mlir::SymbolTable::Visibility::Public :
+ ::mlir::SymbolTable::Visibility::Nested;
+ };
}];
let builders = [
@@ -207,8 +258,7 @@ def WasmSSA_FuncImportOp : WasmSSA_Op<"import_func", [
StrAttr: $importName,
WasmSSA_FuncTypeAttr: $type,
OptionalAttr<DictArrayAttr>:$arg_attrs,
- OptionalAttr<DictArrayAttr>:$res_attrs,
- OptionalAttr<StrAttr>:$sym_visibility);
+ OptionalAttr<DictArrayAttr>:$res_attrs);
let extraClassDeclaration = [{
bool isDeclaration() const { return true; }
@@ -221,6 +271,10 @@ def WasmSSA_FuncImportOp : WasmSSA_Op<"import_func", [
::llvm::ArrayRef<Type> getResultTypes() {
return getType().getResults();
}
+
+ ::mlir::SymbolTable::Visibility getVisibility() {
+ return ::mlir::SymbolTable::Visibility::Nested;
+ };
}];
let builders = [
OpBuilder<(ins "StringRef":$symbol,
@@ -238,30 +292,41 @@ def WasmSSA_GlobalOp : WasmSSA_Op<"global", [
let arguments = (ins SymbolNameAttr: $sym_name,
WasmSSA_ValTypeAttr: $type,
UnitAttr: $isMutable,
- OptionalAttr<StrAttr>:$sym_visibility);
+ UnitAttr: $exported);
let description = [{
WebAssembly global variable.
Body contains the initialization instructions for the variable value.
The body must contain only instructions considered `const` in a webassembly context,
such as `wasmssa.const` or `global.get`.
+ By default, `wasmssa.global` have nested visibility. Global exported by the module
+ are marked with the exported attribute. This gives them public visibility.
+
Example:
```mlir
- // Define a global_var, a mutable i32 global variable equal to 10.
- wasmssa.global @global_var i32 mutable nested : {
+ // Define module_global_var, an internal mutable i32 global variable equal to 10.
+ wasmssa.global @module_global_var i32 mutable : {
%[[VAL_0:.*]] = wasmssa.const 10 : i32
wasmssa.return %[[VAL_0]] : i32
}
+
+ // Define global_var, an exported constant i32 global variable equal to 42.
+ wasmssa.global @global_var i32 : {
+ %[[VAL_0:.*]] = wasmssa.const 42 : i32
+ wasmssa.return %[[VAL_0]] : i32
+ }
```
}];
let regions = (region AnyRegion: $initializer);
- let builders = [
- OpBuilder<(ins "StringRef":$symbol,
- "Type": $type,
- "bool": $isMutable)>
- ];
+ let extraClassDeclaration = [{
+ ::mlir::SymbolTable::Visibility getVisibility() {
+ return getExported() ?
+ ::mlir::SymbolTable::Visibility::Public :
+ ::mlir::SymbolTable::Visibility::Nested;
+ };
+ }];
let hasCustomAssemblyFormat = 1;
}
@@ -283,18 +348,14 @@ def WasmSSA_GlobalImportOp : WasmSSA_Op<"import_global", [
StrAttr: $moduleName,
StrAttr: $importName,
WasmSSA_ValTypeAttr: $type,
- UnitAttr: $isMutable,
- OptionalAttr<StrAttr>:$sym_visibility);
+ UnitAttr: $isMutable);
let extraClassDeclaration = [{
bool isDeclaration() const { return true; }
+
+ ::mlir::SymbolTable::Visibility getVisibility() {
+ return ::mlir::SymbolTable::Visibility::Nested;
+ };
}];
- let builders = [
- OpBuilder<(ins "StringRef":$symbol,
- "StringRef":$moduleName,
- "StringRef":$importName,
- "Type": $type,
- "bool": $isMutable)>
- ];
let hasCustomAssemblyFormat = 1;
}
@@ -442,23 +503,33 @@ def WasmSSA_MemOp : WasmSSA_Op<"memory", [Symbol]> {
Define a memory to be used by the program.
Multiple memories can be defined in the same module.
+ By default, `wasmssa.memory` have nested visibility. Memory exported by
+ the module are marked with the exported attribute. This gives them public
+ visibility.
+
Example:
```mlir
- // Define the `mem_0` memory with defined bounds of 0 -> 65536
+ // Define the `mem_0` (internal) memory with defined size bounds of [0:65536]
wasmssa.memory @mem_0 !wasmssa<limit[0:65536]>
+
+ // Define the `mem_1` exported memory with minimal size of 512
+ wasmssa.memory exported @mem_1 !wasmssa<limit[512:]>
```
}];
let arguments = (ins SymbolNameAttr: $sym_name,
WasmSSA_LimitTypeAttr: $limits,
- OptionalAttr<StrAttr>:$sym_visibility);
- let builders = [
- OpBuilder<(ins
- "::llvm::StringRef":$symbol,
- "wasmssa::LimitType":$limit)>
- ];
+ UnitAttr: $exported);
- let assemblyFormat = "$sym_name custom<WasmVisibility>($sym_visibility) $limits attr-dict";
+ let extraClassDeclaration = [{
+ ::mlir::SymbolTable::Visibility getVisibility() {
+ return getExported() ?
+ ::mlir::SymbolTable::Visibility::Public :
+ ::mlir::SymbolTable::Visibility::Nested;
+ };
+ }];
+
+ let assemblyFormat = "(`exported` $exported^)? $sym_name $limits attr-dict";
}
def WasmSSA_MemImportOp : WasmSSA_Op<"import_mem", [Symbol, ImportOpInterface]> {
@@ -476,16 +547,13 @@ def WasmSSA_MemImportOp : WasmSSA_Op<"import_mem", [Symbol, ImportOpInterface]>
let arguments = (ins SymbolNameAttr: $sym_name,
StrAttr: $moduleName,
StrAttr: $importName,
- WasmSSA_LimitTypeAttr: $limits,
- OptionalAttr<StrAttr>:$sym_visibility);
+ WasmSSA_LimitTypeAttr: $limits);
let extraClassDeclaration = [{
- bool isDeclaration() const { return true; }
+ bool isDeclaration() const { return true; }
+ ::mlir::SymbolTable::Visibility getVisibility() {
+ return ::mlir::SymbolTable::Visibility::Nested;
+ };
}];
- let builders = [OpBuilder<(ins
- "::llvm::StringRef":$symbol,
- "::llvm::StringRef":$moduleName,
- "::llvm::StringRef":$importName,
- "wasmssa::LimitType":$limits)>];
let assemblyFormat = "$importName `from` $moduleName `as` $sym_name attr-dict";
}
@@ -493,11 +561,15 @@ def WasmSSA_TableOp : WasmSSA_Op<"table", [Symbol]> {
let summary= "WebAssembly table value";
let arguments = (ins SymbolNameAttr: $sym_name,
WasmSSA_TableTypeAttr: $type,
- OptionalAttr<StrAttr>:$sym_visibility);
- let builders = [OpBuilder<(ins
- "::llvm::StringRef":$symbol,
- "wasmssa::TableType":$type)>];
- let assemblyFormat = "$sym_name custom<WasmVisibility>($sym_visibility) $type attr-dict";
+ UnitAttr: $exported);
+ let extraClassDeclaration = [{
+ ::mlir::SymbolTable::Visibility getVisibility() {
+ return getExported() ?
+ ::mlir::SymbolTable::Visibility::Public :
+ ::mlir::SymbolTable::Visibility::Nested;
+ };
+ }];
+ let assemblyFormat = "(`exported` $exported^)? $sym_name $type attr-dict";
}
def WasmSSA_TableImportOp : WasmSSA_Op<"import_table", [Symbol, ImportOpInterface]> {
@@ -515,17 +587,14 @@ def WasmSSA_TableImportOp : WasmSSA_Op<"import_table", [Symbol, ImportOpInterfac
let arguments = (ins SymbolNameAttr: $sym_name,
StrAttr: $moduleName,
StrAttr: $importName,
- WasmSSA_TableTypeAttr: $type,
- OptionalAttr<StrAttr>:$sym_visibility);
+ WasmSSA_TableTypeAttr: $type);
let extraClassDeclaration = [{
bool isDeclaration() const { return true; }
+ ::mlir::SymbolTable::Visibility getVisibility() {
+ return ::mlir::SymbolTable::Visibility::Nested;
+ };
}];
let assemblyFormat = "$importName `from` $moduleName `as` $sym_name attr-dict";
- let builders = [OpBuilder<(ins
- "::llvm::StringRef":$symbol,
- "::llvm::StringRef":$moduleName,
- "::llvm::StringRef":$importName,
- "wasmssa::TableType":$type)>];
}
def WasmSSA_ReturnOp : WasmSSA_Op<"return", [Terminator]> {
diff --git a/mlir/include/mlir/IR/CommonTypeConstraints.td b/mlir/include/mlir/IR/CommonTypeConstraints.td
index 6b4e3dd..8427ba5 100644
--- a/mlir/include/mlir/IR/CommonTypeConstraints.td
+++ b/mlir/include/mlir/IR/CommonTypeConstraints.td
@@ -623,6 +623,14 @@ class VectorOfLengthAndType<list<int> allowedLengths,
VectorOfNonZeroRankOf<allowedTypes>.summary # VectorOfLength<allowedLengths>.summary,
"::mlir::VectorType">;
+class FixedVectorOfShapeAndType<list<int> shape, Type elType>: ShapedContainerType<
+ [elType],
+ And<[IsVectorOfShape<shape>, IsFixedVectorOfAnyRankTypePred]>,
+ "vector<" # !interleave(shape, "x") # "x" # elType # ">",
+ "::mlir::VectorType">,
+ BuildableType<"::mlir::VectorType::get({" # !interleave(shape, " ,") # "} , " # elType.builderCall # " );">;
+
+
// Any fixed-length vector where the number of elements is from the given
// `allowedLengths` list and the type is from the given `allowedTypes` list
class FixedVectorOfLengthAndType<list<int> allowedLengths,
diff --git a/mlir/include/mlir/Target/Wasm/WasmBinaryEncoding.h b/mlir/include/mlir/Target/Wasm/WasmBinaryEncoding.h
index 21adde8..cd9ef5b 100644
--- a/mlir/include/mlir/Target/Wasm/WasmBinaryEncoding.h
+++ b/mlir/include/mlir/Target/Wasm/WasmBinaryEncoding.h
@@ -19,6 +19,14 @@ namespace mlir {
struct WasmBinaryEncoding {
/// Byte encodings for Wasm instructions.
struct OpCode {
+ // Control instructions.
+ static constexpr std::byte block{0x02};
+ static constexpr std::byte loop{0x03};
+ static constexpr std::byte ifOpCode{0x04};
+ static constexpr std::byte elseOpCode{0x05};
+ static constexpr std::byte branchIf{0x0D};
+ static constexpr std::byte call{0x10};
+
// Locals, globals, constants.
static constexpr std::byte localGet{0x20};
static constexpr std::byte localSet{0x21};
@@ -29,6 +37,42 @@ struct WasmBinaryEncoding {
static constexpr std::byte constFP32{0x43};
static constexpr std::byte constFP64{0x44};
+ // Comparisons.
+ static constexpr std::byte eqzI32{0x45};
+ static constexpr std::byte eqI32{0x46};
+ static constexpr std::byte neI32{0x47};
+ static constexpr std::byte ltSI32{0x48};
+ static constexpr std::byte ltUI32{0x49};
+ static constexpr std::byte gtSI32{0x4A};
+ static constexpr std::byte gtUI32{0x4B};
+ static constexpr std::byte leSI32{0x4C};
+ static constexpr std::byte leUI32{0x4D};
+ static constexpr std::byte geSI32{0x4E};
+ static constexpr std::byte geUI32{0x4F};
+ static constexpr std::byte eqzI64{0x50};
+ static constexpr std::byte eqI64{0x51};
+ static constexpr std::byte neI64{0x52};
+ static constexpr std::byte ltSI64{0x53};
+ static constexpr std::byte ltUI64{0x54};
+ static constexpr std::byte gtSI64{0x55};
+ static constexpr std::byte gtUI64{0x56};
+ static constexpr std::byte leSI64{0x57};
+ static constexpr std::byte leUI64{0x58};
+ static constexpr std::byte geSI64{0x59};
+ static constexpr std::byte geUI64{0x5A};
+ static constexpr std::byte eqF32{0x5B};
+ static constexpr std::byte neF32{0x5C};
+ static constexpr std::byte ltF32{0x5D};
+ static constexpr std::byte gtF32{0x5E};
+ static constexpr std::byte leF32{0x5F};
+ static constexpr std::byte geF32{0x60};
+ static constexpr std::byte eqF64{0x61};
+ static constexpr std::byte neF64{0x62};
+ static constexpr std::byte ltF64{0x63};
+ static constexpr std::byte gtF64{0x64};
+ static constexpr std::byte leF64{0x65};
+ static constexpr std::byte geF64{0x66};
+
// Numeric operations.
static constexpr std::byte clzI32{0x67};
static constexpr std::byte ctzI32{0x68};
@@ -93,6 +137,33 @@ struct WasmBinaryEncoding {
static constexpr std::byte maxF64{0xA5};
static constexpr std::byte copysignF64{0xA6};
static constexpr std::byte wrap{0xA7};
+
+ // Conversion operations
+ static constexpr std::byte extendS{0xAC};
+ static constexpr std::byte extendU{0xAD};
+ static constexpr std::byte convertSI32F32{0xB2};
+ static constexpr std::byte convertUI32F32{0xB3};
+ static constexpr std::byte convertSI64F32{0xB4};
+ static constexpr std::byte convertUI64F32{0xB5};
+
+ static constexpr std::byte demoteF64ToF32{0xB6};
+
+ static constexpr std::byte convertSI32F64{0xB7};
+ static constexpr std::byte convertUI32F64{0xB8};
+ static constexpr std::byte convertSI64F64{0xB9};
+ static constexpr std::byte convertUI64F64{0xBA};
+
+ static constexpr std::byte promoteF32ToF64{0xBB};
+ static constexpr std::byte reinterpretF32AsI32{0xBC};
+ static constexpr std::byte reinterpretF64AsI64{0xBD};
+ static constexpr std::byte reinterpretI32AsF32{0xBE};
+ static constexpr std::byte reinterpretI64AsF64{0xBF};
+
+ static constexpr std::byte extendI328S{0xC0};
+ static constexpr std::byte extendI3216S{0xC1};
+ static constexpr std::byte extendI648S{0xC2};
+ static constexpr std::byte extendI6416S{0xC3};
+ static constexpr std::byte extendI6432S{0xC4};
};
/// Byte encodings of types in Wasm binaries
diff --git a/mlir/lib/Bindings/Python/Rewrite.cpp b/mlir/lib/Bindings/Python/Rewrite.cpp
index 5ddb3fb..0f0ed22 100644
--- a/mlir/lib/Bindings/Python/Rewrite.cpp
+++ b/mlir/lib/Bindings/Python/Rewrite.cpp
@@ -205,7 +205,7 @@ public:
nb::object res = f(opView, PyPatternRewriter(rewriter));
return logicalResultFromObject(res);
};
- MlirRewritePattern pattern = mlirOpRewritePattenCreate(
+ MlirRewritePattern pattern = mlirOpRewritePatternCreate(
rootName, benefit, ctx, callbacks, matchAndRewrite.ptr(),
/* nGeneratedNames */ 0,
/* generatedNames */ nullptr);
diff --git a/mlir/lib/CAPI/Transforms/Rewrite.cpp b/mlir/lib/CAPI/Transforms/Rewrite.cpp
index 46c329d..41ceb15 100644
--- a/mlir/lib/CAPI/Transforms/Rewrite.cpp
+++ b/mlir/lib/CAPI/Transforms/Rewrite.cpp
@@ -341,7 +341,7 @@ private:
} // namespace mlir
-MlirRewritePattern mlirOpRewritePattenCreate(
+MlirRewritePattern mlirOpRewritePatternCreate(
MlirStringRef rootName, unsigned benefit, MlirContext context,
MlirRewritePatternCallbacks callbacks, void *userData,
size_t nGeneratedNames, MlirStringRef *generatedNames) {
diff --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
index b215211..c03f3a5 100644
--- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
+++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
@@ -484,5 +484,5 @@ void mlir::populateGpuToROCDLConversionPatterns(
GPUSubgroupBroadcastOpToROCDL>(converter);
patterns.add<GPUSubgroupSizeOpToROCDL>(converter, chipset);
- populateMathToROCDLConversionPatterns(converter, patterns);
+ populateMathToROCDLConversionPatterns(converter, patterns, chipset);
}
diff --git a/mlir/lib/Conversion/MathToROCDL/CMakeLists.txt b/mlir/lib/Conversion/MathToROCDL/CMakeLists.txt
index 2771955a..8cc3fde 100644
--- a/mlir/lib/Conversion/MathToROCDL/CMakeLists.txt
+++ b/mlir/lib/Conversion/MathToROCDL/CMakeLists.txt
@@ -11,6 +11,7 @@ add_mlir_conversion_library(MLIRMathToROCDL
Core
LINK_LIBS PUBLIC
+ MLIRAMDGPUUtils
MLIRDialectUtils
MLIRFuncDialect
MLIRGPUToGPURuntimeTransforms
diff --git a/mlir/lib/Conversion/MathToROCDL/MathToROCDL.cpp b/mlir/lib/Conversion/MathToROCDL/MathToROCDL.cpp
index df219f3..a2dfc12 100644
--- a/mlir/lib/Conversion/MathToROCDL/MathToROCDL.cpp
+++ b/mlir/lib/Conversion/MathToROCDL/MathToROCDL.cpp
@@ -10,6 +10,8 @@
#include "mlir/Conversion/GPUCommon/GPUCommonPass.h"
#include "mlir/Conversion/LLVMCommon/LoweringOptions.h"
#include "mlir/Conversion/LLVMCommon/TypeConverter.h"
+#include "mlir/Conversion/LLVMCommon/VectorPattern.h"
+#include "mlir/Dialect/AMDGPU/Utils/Chipset.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/ROCDLDialect.h"
@@ -19,6 +21,7 @@
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
+#include "llvm/Support/DebugLog.h"
#include "../GPUCommon/GPUOpsLowering.h"
#include "../GPUCommon/OpToFuncCallLowering.h"
@@ -42,8 +45,46 @@ static void populateOpPatterns(const LLVMTypeConverter &converter,
f32ApproxFunc, f16Func);
}
+struct ClampFOpConversion final
+ : public ConvertOpToLLVMPattern<math::ClampFOp> {
+ using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern;
+
+ LogicalResult
+ matchAndRewrite(math::ClampFOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ // Only f16 and f32 types are supported by fmed3
+ Type opTy = op.getType();
+ Type resultType = getTypeConverter()->convertType(opTy);
+
+ if (auto vectorType = dyn_cast<VectorType>(opTy))
+ opTy = vectorType.getElementType();
+
+ if (!isa<Float16Type, Float32Type>(opTy))
+ return rewriter.notifyMatchFailure(
+ op, "fmed3 only supports f16 and f32 types");
+
+ // Handle multi-dimensional vectors (converted to LLVM arrays)
+ if (auto arrayType = dyn_cast<LLVM::LLVMArrayType>(resultType))
+ return LLVM::detail::handleMultidimensionalVectors(
+ op.getOperation(), adaptor.getOperands(), *getTypeConverter(),
+ [&](Type llvm1DVectorTy, ValueRange operands) -> Value {
+ typename math::ClampFOp::Adaptor adaptor(operands);
+ return ROCDL::FMed3Op::create(rewriter, op.getLoc(), llvm1DVectorTy,
+ adaptor.getValue(), adaptor.getMin(),
+ adaptor.getMax());
+ },
+ rewriter);
+
+ // Handle 1D vectors and scalars directly
+ rewriter.replaceOpWithNewOp<ROCDL::FMed3Op>(op, op.getType(), op.getValue(),
+ op.getMin(), op.getMax());
+ return success();
+ }
+};
+
void mlir::populateMathToROCDLConversionPatterns(
- const LLVMTypeConverter &converter, RewritePatternSet &patterns) {
+ const LLVMTypeConverter &converter, RewritePatternSet &patterns,
+ std::optional<amdgpu::Chipset> chipset) {
// Handled by mathToLLVM: math::AbsIOp
// Handled by mathToLLVM: math::AbsFOp
// Handled by mathToLLVM: math::CopySignOp
@@ -118,15 +159,21 @@ void mlir::populateMathToROCDLConversionPatterns(
// worth creating a separate pass for it.
populateOpPatterns<arith::RemFOp>(converter, patterns, "__ocml_fmod_f32",
"__ocml_fmod_f64", "__ocml_fmod_f16");
+
+ if (chipset.has_value() && chipset->majorVersion >= 9) {
+ patterns.add<ClampFOpConversion>(converter);
+ } else {
+ LDBG() << "Chipset dependent patterns were not added";
+ }
}
-namespace {
-struct ConvertMathToROCDLPass
- : public impl::ConvertMathToROCDLBase<ConvertMathToROCDLPass> {
- ConvertMathToROCDLPass() = default;
+struct ConvertMathToROCDLPass final
+ : impl::ConvertMathToROCDLBase<ConvertMathToROCDLPass> {
+ using impl::ConvertMathToROCDLBase<
+ ConvertMathToROCDLPass>::ConvertMathToROCDLBase;
+
void runOnOperation() override;
};
-} // namespace
void ConvertMathToROCDLPass::runOnOperation() {
auto m = getOperation();
@@ -135,10 +182,21 @@ void ConvertMathToROCDLPass::runOnOperation() {
RewritePatternSet patterns(&getContext());
LowerToLLVMOptions options(ctx, DataLayout(m));
LLVMTypeConverter converter(ctx, options);
- populateMathToROCDLConversionPatterns(converter, patterns);
+
+ FailureOr<amdgpu::Chipset> maybeChipset;
+ if (!chipset.empty()) {
+ maybeChipset = amdgpu::Chipset::parse(chipset);
+ if (failed(maybeChipset))
+ return signalPassFailure();
+ }
+ populateMathToROCDLConversionPatterns(
+ converter, patterns,
+ succeeded(maybeChipset) ? std::optional(*maybeChipset) : std::nullopt);
+
ConversionTarget target(getContext());
- target.addLegalDialect<BuiltinDialect, func::FuncDialect,
- vector::VectorDialect, LLVM::LLVMDialect>();
+ target
+ .addLegalDialect<BuiltinDialect, func::FuncDialect, vector::VectorDialect,
+ LLVM::LLVMDialect, ROCDL::ROCDLDialect>();
target.addIllegalOp<LLVM::CosOp, LLVM::ExpOp, LLVM::Exp2Op, LLVM::FAbsOp,
LLVM::FCeilOp, LLVM::FFloorOp, LLVM::FRemOp, LLVM::LogOp,
LLVM::Log10Op, LLVM::Log2Op, LLVM::PowOp, LLVM::SinOp,
diff --git a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
index c798adb..61166db 100644
--- a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
+++ b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
@@ -339,6 +339,25 @@ void RawBufferAtomicCmpswapOp::getCanonicalizationPatterns(
}
//===----------------------------------------------------------------------===//
+// ScaledExtPacked816Op
+//===----------------------------------------------------------------------===//
+LogicalResult ScaledExtPacked816Op::verify() {
+ int blockSize = getBlockSize();
+ assert((blockSize == 16 || blockSize == 32) && "invalid block size");
+ int firstScaleByte = getFirstScaleByte();
+ if (blockSize == 16 && !llvm::is_contained({0, 1}, firstScaleByte)) {
+ return emitOpError(
+ "blockSize of 16 can only have firstScaleByte be 0 or 1.");
+ }
+ if (blockSize == 32 && !llvm::is_contained({0, 2}, firstScaleByte)) {
+ return emitOpError(
+ "blockSize of 32 can only have firstScaleByte be 0 or 2.");
+ }
+
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
// WMMAOp
//===----------------------------------------------------------------------===//
LogicalResult WMMAOp::verify() {
diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index 749e2ba..e0a53cd 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -2600,6 +2600,65 @@ static LogicalResult foldLoopBounds(AffineForOp forOp) {
return success(folded);
}
+/// Returns constant trip count in trivial cases.
+static std::optional<uint64_t> getTrivialConstantTripCount(AffineForOp forOp) {
+ int64_t step = forOp.getStepAsInt();
+ if (!forOp.hasConstantBounds() || step <= 0)
+ return std::nullopt;
+ int64_t lb = forOp.getConstantLowerBound();
+ int64_t ub = forOp.getConstantUpperBound();
+ return ub - lb <= 0 ? 0 : (ub - lb + step - 1) / step;
+}
+
+/// Fold the empty loop.
+static SmallVector<OpFoldResult> AffineForEmptyLoopFolder(AffineForOp forOp) {
+ if (!llvm::hasSingleElement(*forOp.getBody()))
+ return {};
+ if (forOp.getNumResults() == 0)
+ return {};
+ std::optional<uint64_t> tripCount = getTrivialConstantTripCount(forOp);
+ if (tripCount == 0) {
+ // The initial values of the iteration arguments would be the op's
+ // results.
+ return forOp.getInits();
+ }
+ SmallVector<Value, 4> replacements;
+ auto yieldOp = cast<AffineYieldOp>(forOp.getBody()->getTerminator());
+ auto iterArgs = forOp.getRegionIterArgs();
+ bool hasValDefinedOutsideLoop = false;
+ bool iterArgsNotInOrder = false;
+ for (unsigned i = 0, e = yieldOp->getNumOperands(); i < e; ++i) {
+ Value val = yieldOp.getOperand(i);
+ BlockArgument *iterArgIt = llvm::find(iterArgs, val);
+ // TODO: It should be possible to perform a replacement by computing the
+ // last value of the IV based on the bounds and the step.
+ if (val == forOp.getInductionVar())
+ return {};
+ if (iterArgIt == iterArgs.end()) {
+ // `val` is defined outside of the loop.
+ assert(forOp.isDefinedOutsideOfLoop(val) &&
+ "must be defined outside of the loop");
+ hasValDefinedOutsideLoop = true;
+ replacements.push_back(val);
+ } else {
+ unsigned pos = std::distance(iterArgs.begin(), iterArgIt);
+ if (pos != i)
+ iterArgsNotInOrder = true;
+ replacements.push_back(forOp.getInits()[pos]);
+ }
+ }
+ // Bail out when the trip count is unknown and the loop returns any value
+ // defined outside of the loop or any iterArg out of order.
+ if (!tripCount.has_value() &&
+ (hasValDefinedOutsideLoop || iterArgsNotInOrder))
+ return {};
+ // Bail out when the loop iterates more than once and it returns any iterArg
+ // out of order.
+ if (tripCount.has_value() && tripCount.value() >= 2 && iterArgsNotInOrder)
+ return {};
+ return llvm::to_vector_of<OpFoldResult>(replacements);
+}
+
/// Canonicalize the bounds of the given loop.
static LogicalResult canonicalizeLoopBounds(AffineForOp forOp) {
SmallVector<Value, 4> lbOperands(forOp.getLowerBoundOperands());
@@ -2631,79 +2690,30 @@ static LogicalResult canonicalizeLoopBounds(AffineForOp forOp) {
return success();
}
-namespace {
-/// Returns constant trip count in trivial cases.
-static std::optional<uint64_t> getTrivialConstantTripCount(AffineForOp forOp) {
- int64_t step = forOp.getStepAsInt();
- if (!forOp.hasConstantBounds() || step <= 0)
- return std::nullopt;
- int64_t lb = forOp.getConstantLowerBound();
- int64_t ub = forOp.getConstantUpperBound();
- return ub - lb <= 0 ? 0 : (ub - lb + step - 1) / step;
+/// Returns true if the affine.for has zero iterations in trivial cases.
+static bool hasTrivialZeroTripCount(AffineForOp op) {
+ return getTrivialConstantTripCount(op) == 0;
}
-/// This is a pattern to fold trivially empty loop bodies.
-/// TODO: This should be moved into the folding hook.
-struct AffineForEmptyLoopFolder : public OpRewritePattern<AffineForOp> {
- using OpRewritePattern<AffineForOp>::OpRewritePattern;
-
- LogicalResult matchAndRewrite(AffineForOp forOp,
- PatternRewriter &rewriter) const override {
- // Check that the body only contains a yield.
- if (!llvm::hasSingleElement(*forOp.getBody()))
- return failure();
- if (forOp.getNumResults() == 0)
- return success();
- std::optional<uint64_t> tripCount = getTrivialConstantTripCount(forOp);
- if (tripCount == 0) {
- // The initial values of the iteration arguments would be the op's
- // results.
- rewriter.replaceOp(forOp, forOp.getInits());
- return success();
- }
- SmallVector<Value, 4> replacements;
- auto yieldOp = cast<AffineYieldOp>(forOp.getBody()->getTerminator());
- auto iterArgs = forOp.getRegionIterArgs();
- bool hasValDefinedOutsideLoop = false;
- bool iterArgsNotInOrder = false;
- for (unsigned i = 0, e = yieldOp->getNumOperands(); i < e; ++i) {
- Value val = yieldOp.getOperand(i);
- auto *iterArgIt = llvm::find(iterArgs, val);
- // TODO: It should be possible to perform a replacement by computing the
- // last value of the IV based on the bounds and the step.
- if (val == forOp.getInductionVar())
- return failure();
- if (iterArgIt == iterArgs.end()) {
- // `val` is defined outside of the loop.
- assert(forOp.isDefinedOutsideOfLoop(val) &&
- "must be defined outside of the loop");
- hasValDefinedOutsideLoop = true;
- replacements.push_back(val);
- } else {
- unsigned pos = std::distance(iterArgs.begin(), iterArgIt);
- if (pos != i)
- iterArgsNotInOrder = true;
- replacements.push_back(forOp.getInits()[pos]);
- }
- }
- // Bail out when the trip count is unknown and the loop returns any value
- // defined outside of the loop or any iterArg out of order.
- if (!tripCount.has_value() &&
- (hasValDefinedOutsideLoop || iterArgsNotInOrder))
- return failure();
- // Bail out when the loop iterates more than once and it returns any iterArg
- // out of order.
- if (tripCount.has_value() && tripCount.value() >= 2 && iterArgsNotInOrder)
- return failure();
- rewriter.replaceOp(forOp, replacements);
- return success();
+LogicalResult AffineForOp::fold(FoldAdaptor adaptor,
+ SmallVectorImpl<OpFoldResult> &results) {
+ bool folded = succeeded(foldLoopBounds(*this));
+ folded |= succeeded(canonicalizeLoopBounds(*this));
+ if (hasTrivialZeroTripCount(*this) && getNumResults() != 0) {
+ // The initial values of the loop-carried variables (iter_args) are the
+ // results of the op. But this must be avoided for an affine.for op that
+ // does not return any results. Since ops that do not return results cannot
+ // be folded away, we would enter an infinite loop of folds on the same
+ // affine.for op.
+ results.assign(getInits().begin(), getInits().end());
+ folded = true;
}
-};
-} // namespace
-
-void AffineForOp::getCanonicalizationPatterns(RewritePatternSet &results,
- MLIRContext *context) {
- results.add<AffineForEmptyLoopFolder>(context);
+ SmallVector<OpFoldResult> foldResults = AffineForEmptyLoopFolder(*this);
+ if (!foldResults.empty()) {
+ results.assign(foldResults);
+ folded = true;
+ }
+ return success(folded);
}
OperandRange AffineForOp::getEntrySuccessorOperands(RegionBranchPoint point) {
@@ -2746,27 +2756,6 @@ void AffineForOp::getSuccessorRegions(
regions.push_back(RegionSuccessor(getResults()));
}
-/// Returns true if the affine.for has zero iterations in trivial cases.
-static bool hasTrivialZeroTripCount(AffineForOp op) {
- return getTrivialConstantTripCount(op) == 0;
-}
-
-LogicalResult AffineForOp::fold(FoldAdaptor adaptor,
- SmallVectorImpl<OpFoldResult> &results) {
- bool folded = succeeded(foldLoopBounds(*this));
- folded |= succeeded(canonicalizeLoopBounds(*this));
- if (hasTrivialZeroTripCount(*this) && getNumResults() != 0) {
- // The initial values of the loop-carried variables (iter_args) are the
- // results of the op. But this must be avoided for an affine.for op that
- // does not return any results. Since ops that do not return results cannot
- // be folded away, we would enter an infinite loop of folds on the same
- // affine.for op.
- results.assign(getInits().begin(), getInits().end());
- folded = true;
- }
- return success(folded);
-}
-
AffineBound AffineForOp::getLowerBound() {
return AffineBound(*this, getLowerBoundOperands(), getLowerBoundMap());
}
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp
index 70faa71..bc17990 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp
@@ -41,18 +41,37 @@ namespace bufferization {
using namespace mlir;
-/// Return the unique ReturnOp that terminates `funcOp`.
-/// Return nullptr if there is no such unique ReturnOp.
-static func::ReturnOp getAssumedUniqueReturnOp(func::FuncOp funcOp) {
- func::ReturnOp returnOp;
+/// Get all the ReturnOp in the funcOp.
+static SmallVector<func::ReturnOp> getReturnOps(func::FuncOp funcOp) {
+ SmallVector<func::ReturnOp> returnOps;
for (Block &b : funcOp.getBody()) {
if (auto candidateOp = dyn_cast<func::ReturnOp>(b.getTerminator())) {
- if (returnOp)
- return nullptr;
- returnOp = candidateOp;
+ returnOps.push_back(candidateOp);
}
}
- return returnOp;
+ return returnOps;
+}
+
+/// Get the operands at the specified position for all returnOps.
+static SmallVector<Value>
+getReturnOpsOperandInPos(ArrayRef<func::ReturnOp> returnOps, size_t pos) {
+ return llvm::map_to_vector(returnOps, [&](func::ReturnOp returnOp) {
+ return returnOp.getOperand(pos);
+ });
+}
+
+/// Check if all given values are the same buffer as the block argument (modulo
+/// cast ops).
+static bool operandsEqualFuncArgument(ArrayRef<Value> operands,
+ BlockArgument argument) {
+ for (Value val : operands) {
+ while (auto castOp = val.getDefiningOp<memref::CastOp>())
+ val = castOp.getSource();
+
+ if (val != argument)
+ return false;
+ }
+ return true;
}
LogicalResult
@@ -72,40 +91,45 @@ mlir::bufferization::dropEquivalentBufferResults(ModuleOp module) {
for (auto funcOp : module.getOps<func::FuncOp>()) {
if (funcOp.isExternal() || funcOp.isPublic())
continue;
- func::ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp);
- // TODO: Support functions with multiple blocks.
- if (!returnOp)
+ SmallVector<func::ReturnOp> returnOps = getReturnOps(funcOp);
+ if (returnOps.empty())
continue;
// Compute erased results.
- SmallVector<Value> newReturnValues;
- BitVector erasedResultIndices(funcOp.getFunctionType().getNumResults());
+ size_t numReturnOps = returnOps.size();
+ size_t numReturnValues = funcOp.getFunctionType().getNumResults();
+ SmallVector<SmallVector<Value>> newReturnValues(numReturnOps);
+ BitVector erasedResultIndices(numReturnValues);
DenseMap<int64_t, int64_t> resultToArgs;
- for (const auto &it : llvm::enumerate(returnOp.getOperands())) {
+ for (size_t i = 0; i < numReturnValues; ++i) {
bool erased = false;
+ SmallVector<Value> returnOperands =
+ getReturnOpsOperandInPos(returnOps, i);
for (BlockArgument bbArg : funcOp.getArguments()) {
- Value val = it.value();
- while (auto castOp = val.getDefiningOp<memref::CastOp>())
- val = castOp.getSource();
-
- if (val == bbArg) {
- resultToArgs[it.index()] = bbArg.getArgNumber();
+ if (operandsEqualFuncArgument(returnOperands, bbArg)) {
+ resultToArgs[i] = bbArg.getArgNumber();
erased = true;
break;
}
}
if (erased) {
- erasedResultIndices.set(it.index());
+ erasedResultIndices.set(i);
} else {
- newReturnValues.push_back(it.value());
+ for (auto [newReturnValue, operand] :
+ llvm::zip(newReturnValues, returnOperands)) {
+ newReturnValue.push_back(operand);
+ }
}
}
// Update function.
if (failed(funcOp.eraseResults(erasedResultIndices)))
return failure();
- returnOp.getOperandsMutable().assign(newReturnValues);
+
+ for (auto [returnOp, newReturnValue] :
+ llvm::zip(returnOps, newReturnValues))
+ returnOp.getOperandsMutable().assign(newReturnValue);
// Update function calls.
for (func::CallOp callOp : callerMap[funcOp]) {
diff --git a/mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt b/mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt
index 70a9c77..ec68acf 100644
--- a/mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt
+++ b/mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt
@@ -1,5 +1,6 @@
add_mlir_dialect_library(MLIRGPUPipelines
GPUToNVVMPipeline.cpp
+ GPUToXeVMPipeline.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/GPU
@@ -11,12 +12,17 @@ add_mlir_dialect_library(MLIRGPUPipelines
MLIRTransforms
MLIRLinalgTransforms
MLIRAffineToStandard
+ MLIRGPUToLLVMSPV
MLIRGPUToNVVMTransforms
MLIRIndexToLLVM
MLIRMathToLLVM
+ MLIRMathToXeVM
MLIRNVGPUToNVVM
MLIRNVVMToLLVM
MLIRReconcileUnrealizedCasts
MLIRSCFToControlFlow
MLIRVectorToSCF
+ MLIRXeGPUTransforms
+ MLIRXeGPUToXeVM
+ MLIRXeVMToLLVM
)
diff --git a/mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp b/mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp
new file mode 100644
index 0000000..1a1485b
--- /dev/null
+++ b/mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp
@@ -0,0 +1,139 @@
+//===- GPUToXeVMPipeline.cpp - Lowering pipeline to XeVM/LLVM -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a pass for testing the lowering to XeVM as a generally
+// usable sink pass. If XeGPU ops are used, it expects the MLIR code to have
+// XeGPU ops already embedded in gpu code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
+#include "mlir/Conversion/MathToXeVM/MathToXeVM.h"
+#include "mlir/Conversion/Passes.h"
+#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h"
+#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
+#include "mlir/Conversion/XeGPUToXeVM/XeGPUToXeVM.h"
+#include "mlir/Conversion/XeVMToLLVM/XeVMToLLVM.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/GPU/IR/GPUDialect.h"
+#include "mlir/Dialect/GPU/Pipelines/Passes.h"
+#include "mlir/Dialect/GPU/Transforms/Passes.h"
+#include "mlir/Dialect/LLVMIR/Transforms/RequestCWrappers.h"
+#include "mlir/Dialect/MemRef/Transforms/Passes.h"
+#include "mlir/Dialect/XeGPU/Transforms/Passes.h"
+#include "mlir/Pass/PassManager.h"
+#include "mlir/Pass/PassOptions.h"
+#include "mlir/Target/LLVM/XeVM/Target.h"
+#include "mlir/Transforms/Passes.h"
+
+using namespace mlir;
+
+namespace {
+//===----------------------------------------------------------------------===//
+// Pre-GPU common pipeline for both Host and GPU.
+//===----------------------------------------------------------------------===//
+void buildPreGPUCommonPassPipeline(
+ OpPassManager &pm, const mlir::gpu::GPUToXeVMPipelineOptions &options) {
+ // builtin.module scope passes.
+ pm.addPass(createCSEPass());
+ pm.addPass(createConvertVectorToSCFPass());
+ {
+ GpuXeVMAttachTargetOptions xevmTargetOptions;
+ xevmTargetOptions.moduleMatcher = options.xevmModuleMatcher;
+ xevmTargetOptions.triple = options.zebinTriple;
+ xevmTargetOptions.chip = options.zebinChip;
+ xevmTargetOptions.optLevel = options.optLevel;
+ xevmTargetOptions.cmdOptions = options.cmdOptions;
+ pm.addPass(createGpuXeVMAttachTarget(xevmTargetOptions));
+ }
+ pm.addPass(createLowerAffinePass());
+ pm.addNestedPass<func::FuncOp>(createGpuAsyncRegionPass());
+}
+
+//===----------------------------------------------------------------------===//
+// GPUModule-specific stuff.
+//===----------------------------------------------------------------------===//
+void buildGPUPassPipeline(OpPassManager &pm,
+ const mlir::gpu::GPUToXeVMPipelineOptions &options) {
+ if (options.xegpuOpLevel == "workgroup") {
+ pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUWgToSgDistribute());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUBlocking());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCanonicalizerPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
+ }
+ if (options.xegpuOpLevel == "subgroup" ||
+ options.xegpuOpLevel == "workgroup") {
+ pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUPropagateLayout());
+ pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUSubgroupDistribute());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCanonicalizerPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(createLoopInvariantCodeMotionPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUVectorLinearize());
+ }
+ pm.addNestedPass<gpu::GPUModuleOp>(createConvertMathToXeVM());
+ pm.addNestedPass<gpu::GPUModuleOp>(createConvertXeGPUToXeVMPass());
+ {
+ ConvertGpuOpsToLLVMSPVOpsOptions gpuToLLVMSPVOptions;
+ gpuToLLVMSPVOptions.use64bitIndex = options.use64bitIndex;
+ pm.addNestedPass<gpu::GPUModuleOp>(
+ createConvertGpuOpsToLLVMSPVOps(gpuToLLVMSPVOptions));
+ }
+ pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(createReconcileUnrealizedCastsPass());
+}
+
+//===----------------------------------------------------------------------===//
+// Post-GPU pipeline for both Host and GPU.
+//===----------------------------------------------------------------------===//
+void buildPostGPUCommonPassPipeline(
+ OpPassManager &pm, const mlir::gpu::GPUToXeVMPipelineOptions &options) {
+ // builtin.module scope passes.
+ pm.addPass(createSCFToControlFlowPass());
+ pm.addPass(memref::createExpandStridedMetadataPass());
+ {
+ GpuToLLVMConversionPassOptions gpuToLLVMOptions;
+ gpuToLLVMOptions.hostBarePtrCallConv = options.hostBarePtrCallConv;
+ gpuToLLVMOptions.kernelBarePtrCallConv = options.kernelBarePtrCallConv;
+ pm.addPass(createGpuToLLVMConversionPass(gpuToLLVMOptions));
+ }
+ pm.addPass(createLowerAffinePass());
+ pm.addPass(createConvertToLLVMPass());
+ pm.addPass(createReconcileUnrealizedCastsPass());
+ // gpu-module-to-binary
+ {
+ GpuModuleToBinaryPassOptions gpuToModuleBinOptions;
+ gpuToModuleBinOptions.compilationTarget = options.binaryFormat;
+ gpuToModuleBinOptions.cmdOptions = options.cmdOptions;
+ pm.addPass(createGpuModuleToBinaryPass(gpuToModuleBinOptions));
+ }
+}
+} // namespace
+
+void mlir::gpu::buildLowerToXeVMPassPipeline(
+ OpPassManager &pm, const GPUToXeVMPipelineOptions &options) {
+ // Pre-GPU common pipelines.
+ buildPreGPUCommonPassPipeline(pm, options);
+
+ // GPUModule-specific stuff.
+ buildGPUPassPipeline(pm, options);
+
+ // Post-GPU pipeline for both Host and GPU.
+ buildPostGPUCommonPassPipeline(pm, options);
+}
+
+void mlir::gpu::registerGPUToXeVMPipeline() {
+ PassPipelineRegistration<GPUToXeVMPipelineOptions>(
+ "gpu-lower-to-xevm-pipeline",
+ "The default GPU to XeVM lowering pipeline. It starts by lowering GPU "
+ "code to the "
+ "specified compilation target (default is fatbin) then lowers the host "
+ "code.",
+ buildLowerToXeVMPassPipeline);
+}
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 6192d79..9a8a63e 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -2457,26 +2457,24 @@ transform::PadTilingInterfaceOp::apply(transform::TransformRewriter &rewriter,
}
// Set options.
- TilingInterface paddedOp;
PadTilingInterfaceOptions options;
options.setPaddingValues(paddingValues)
.setPaddingSizes(getMixedPaddingSizes())
.setPadToMultipleOf(getPadToMultipleOf());
- // Apply padding.
- SmallVector<tensor::PadOp> newPadOps;
- FailureOr<TilingInterface> maybePaddedOp = rewriteAsPaddedOp(
- rewriter, cast<TilingInterface>(targetOp.getOperation()), options,
- newPadOps);
- if (failed(maybePaddedOp)) {
+ auto maybePadOps = rewriteAsPaddedOp(
+ rewriter, cast<TilingInterface>(targetOp.getOperation()), options);
+ if (failed(maybePadOps)) {
auto diag = emitSilenceableError() << "failed to pad op";
diag.attachNote(target->getLoc()) << "target op";
return diag;
}
+ const auto &[paddedOperands, paddedOp, slicedResults] = maybePadOps.value();
// Set transform results.
- paddedOps.push_back(cast<TilingInterface>(maybePaddedOp->getOperation()));
- padOps.append(newPadOps.begin(), newPadOps.end());
+ paddedOps.push_back(paddedOp);
+ padOps.append(paddedOperands.begin(), paddedOperands.end());
+ rewriter.replaceOp(targetOp.getOperation(), slicedResults);
}
results.set(cast<OpResult>(getPadded()), paddedOps);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
index 0956c5d..3e787a2 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
@@ -95,10 +95,11 @@ static int64_t extractConstantMultiplier(AffineExpr expr) {
/// - affine_map<(d0, d1) -> (d0 * 3 + d1)>
/// In the future, more general interfaces can be devised to encode similar
/// shape evolutions and map between an op and its operands.
-SmallVector<OpFoldResult> linalg::computePaddedShape(
- RewriterBase &rewriter, TypedValue<RankedTensorType> v,
- AffineMap indexingMap, ArrayRef<OpFoldResult> indexingSizes,
- const PadTilingInterfaceOptions &options) {
+SmallVector<OpFoldResult>
+linalg::computePaddedShape(OpBuilder &builder, TypedValue<RankedTensorType> v,
+ AffineMap indexingMap,
+ ArrayRef<OpFoldResult> indexingSizes,
+ const PadTilingInterfaceOptions &options) {
Location loc = v.getLoc();
SmallVector<OpFoldResult> paddedShape;
auto tensorType = cast<RankedTensorType>(v.getType());
@@ -109,7 +110,7 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
// "Full-rank" padding specification.
SmallVector<OpFoldResult> paddingSizes =
- getFullRankPaddingSizes(rewriter, indexingSizes, options);
+ getFullRankPaddingSizes(builder, indexingSizes, options);
// For each dimension in the operand's shape, iterate over indexingSizes and
// add the various term contributions.
@@ -147,28 +148,27 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
OpFoldResult paddingDimOfr;
if (options.padToMultipleOf) {
AffineExpr d0, s0;
- bindDims(rewriter.getContext(), d0);
- bindSymbols(rewriter.getContext(), s0);
+ bindDims(builder.getContext(), d0);
+ bindSymbols(builder.getContext(), s0);
AffineMap ceilMap = AffineMap::get(1, 1, d0.ceilDiv(s0) * s0);
AffineMap composedMap = projectedMap.compose(ceilMap);
paddingDimOfr = affine::makeComposedFoldedAffineApply(
- rewriter, loc, composedMap,
- {indexingSizes[paddingDim], paddingSize},
+ builder, loc, composedMap, {indexingSizes[paddingDim], paddingSize},
/*composeAffineMin=*/true);
} else {
// Otherwise just set to paddingSize.
paddingDimOfr = affine::makeComposedFoldedAffineApply(
- rewriter, loc, projectedMap, paddingSize);
+ builder, loc, projectedMap, paddingSize);
}
// Adjust for the maximum accessed index, which is (paddingSize - 1) *
// multiplier.
AffineExpr d0;
- bindDims(rewriter.getContext(), d0);
+ bindDims(builder.getContext(), d0);
int64_t multiplier = extractConstantMultiplier(projectedMap.getResult(0));
AffineMap subtractMap = AffineMap::get(1, 0, d0 - multiplier);
OpFoldResult maxAccessIdx = affine::makeComposedFoldedAffineApply(
- rewriter, loc, subtractMap, {paddingDimOfr});
+ builder, loc, subtractMap, {paddingDimOfr});
terms.push_back(maxAccessIdx);
LLVM_DEBUG(DBGS() << "------new term: " << terms.back() << "\n");
@@ -177,19 +177,19 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
// If there are no terms, just return the dim.
if (terms.empty()) {
paddedShape[resultIndex] =
- createFoldedDimOp(rewriter, loc, v, resultIndex);
+ createFoldedDimOp(builder, loc, v, resultIndex);
continue;
}
// Sum individual terms' contributions.
SmallVector<AffineExpr> dims(terms.size());
- bindDimsList(rewriter.getContext(), MutableArrayRef{dims});
+ bindDimsList(builder.getContext(), MutableArrayRef{dims});
AffineExpr sumExpr = dims.front();
for (unsigned i = 1; i < dims.size(); ++i)
sumExpr = sumExpr + dims[i];
// Add 1 to the maximum accessed index and get the final padded size.
- OpFoldResult paddedDimOfr = affine::makeComposedFoldedAffineApply(
- rewriter, loc, sumExpr + 1, terms);
+ OpFoldResult paddedDimOfr =
+ affine::makeComposedFoldedAffineApply(builder, loc, sumExpr + 1, terms);
paddedShape[resultIndex] = paddedDimOfr;
}
@@ -198,7 +198,7 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
FailureOr<SmallVector<OpFoldResult>>
linalg::computeIndexingMapOpInterfacePaddedShape(
- RewriterBase &rewriter, OpOperand &operandToPad,
+ OpBuilder &builder, OpOperand &operandToPad,
ArrayRef<Range> iterationDomain, const PadTilingInterfaceOptions &options) {
auto transferOp =
llvm::dyn_cast<IndexingMapOpInterface>(operandToPad.getOwner());
@@ -206,9 +206,9 @@ linalg::computeIndexingMapOpInterfacePaddedShape(
return failure();
// clang-format off
- assert(llvm::all_of(iterationDomain, [&rewriter](Range r) {
- return r.offset == OpFoldResult(rewriter.getIndexAttr(0)) &&
- r.stride == OpFoldResult(rewriter.getIndexAttr(1));
+ assert(llvm::all_of(iterationDomain, [&builder](Range r) {
+ return r.offset == OpFoldResult(builder.getIndexAttr(0)) &&
+ r.stride == OpFoldResult(builder.getIndexAttr(1));
}) && "expected 0-offset 1-stride loop ranges");
// clang-format on
SmallVector<OpFoldResult> loopUpperBounds;
@@ -218,13 +218,13 @@ linalg::computeIndexingMapOpInterfacePaddedShape(
AffineMap indexingMap = transferOp.getMatchingIndexingMap(&operandToPad);
return computePaddedShape(
- rewriter, cast<TypedValue<RankedTensorType>>(operandToPad.get()),
+ builder, cast<TypedValue<RankedTensorType>>(operandToPad.get()),
indexingMap, loopUpperBounds, options);
}
/// Pad a single operand to `paddedShape` using `paddingValueAttr` as padding
/// Value.
-static Value padOperand(RewriterBase &rewriter, TilingInterface opToPad,
+static Value padOperand(OpBuilder &builder, TilingInterface opToPad,
TypedValue<RankedTensorType> v,
ArrayRef<OpFoldResult> paddedShape,
Attribute paddingValueAttr) {
@@ -232,15 +232,15 @@ static Value padOperand(RewriterBase &rewriter, TilingInterface opToPad,
if (auto complexTy =
dyn_cast<ComplexType>(getElementTypeOrSelf(v.getType()))) {
if (auto complexAttr = dyn_cast<ArrayAttr>(paddingValueAttr)) {
- paddingValue = complex::ConstantOp::create(rewriter, opToPad.getLoc(),
+ paddingValue = complex::ConstantOp::create(builder, opToPad.getLoc(),
complexTy, complexAttr);
}
} else if (isa<ub::PoisonAttr>(paddingValueAttr)) {
- paddingValue = ub::PoisonOp::create(rewriter, opToPad.getLoc(),
+ paddingValue = ub::PoisonOp::create(builder, opToPad.getLoc(),
getElementTypeOrSelf(v.getType()));
} else if (auto typedAttr = dyn_cast<TypedAttr>(paddingValueAttr)) {
paddingValue =
- arith::ConstantOp::create(rewriter, opToPad.getLoc(), typedAttr);
+ arith::ConstantOp::create(builder, opToPad.getLoc(), typedAttr);
}
assert(paddingValue && "failed to create value from padding attribute");
@@ -259,49 +259,48 @@ static Value padOperand(RewriterBase &rewriter, TilingInterface opToPad,
RankedTensorType::get(tensorShape, getElementTypeOrSelf(v));
LLVM_DEBUG(DBGS() << "--SUCCESS, makeComposedPadHighOp with type: "
<< paddedTensorType);
- return makeComposedPadHighOp(rewriter, opToPad.getLoc(), paddedTensorType, v,
+ return makeComposedPadHighOp(builder, opToPad.getLoc(), paddedTensorType, v,
paddingValue, /*nofold=*/false, dynDims);
}
-FailureOr<TilingInterface> linalg::rewriteAsPaddedOp(
- RewriterBase &rewriter, TilingInterface opToPad,
- const PadTilingInterfaceOptions &constOptions,
- SmallVector<tensor::PadOp> &padOps,
+FailureOr<PadTilingInterfaceResult> linalg::rewriteAsPaddedOp(
+ OpBuilder &builder, TilingInterface toPad,
+ PadTilingInterfaceOptions options,
const PadSizeComputationFunction &computePaddingSizeFun) {
- LLVM_DEBUG(DBGS() << "Start rewriteAsPaddedOp : " << opToPad << "\n");
+ LLVM_DEBUG(DBGS() << "Start rewriteAsPaddedOp : " << toPad << "\n");
+ SmallVector<tensor::PadOp> padOps;
+ Location loc = toPad.getLoc();
- Location loc = opToPad.getLoc();
- PadTilingInterfaceOptions options(constOptions);
// Allow inference of pad values if they are not explicitly specified.
// TODO: be mindful about the value depending on the actual operation.
if (options.paddingValues.empty()) {
- SmallVector<Type> types(opToPad->getOperandTypes());
- llvm::append_range(types, opToPad->getResultTypes());
+ SmallVector<Type> types(toPad->getOperandTypes());
+ llvm::append_range(types, toPad->getResultTypes());
for (Type t : types) {
options.paddingValues.push_back(
- rewriter.getZeroAttr(getElementTypeOrSelf(t)));
+ builder.getZeroAttr(getElementTypeOrSelf(t)));
}
}
- if (llvm::any_of(opToPad->getOperands(),
+ if (llvm::any_of(toPad->getOperands(),
[](Value v) { return isa<MemRefType>(v.getType()); })) {
- return rewriter.notifyMatchFailure(opToPad,
- "expected operation on tensors");
+ LLVM_DEBUG(DBGS() << "Not an operation on tensors: FAIL\n");
+ return failure();
}
- OpBuilder::InsertionGuard g(rewriter);
- // Set IP after opToPad because we also take the dims of opToPad's output.
- rewriter.setInsertionPointAfter(opToPad);
+ OpBuilder::InsertionGuard g(builder);
+ // Set IP after toPad because we also take the dims of toPad's output.
+ builder.setInsertionPointAfter(toPad);
// 1. Get the loopUpperBounds from the TilingInterface.
- SmallVector<Range> iterationDomain = opToPad.getIterationDomain(rewriter);
+ SmallVector<Range> iterationDomain = toPad.getIterationDomain(builder);
// 2. For each operand.
SmallVector<Value> newOperands;
- newOperands.reserve(opToPad->getNumOperands());
- for (OpOperand &opOperand : opToPad->getOpOperands()) {
+ newOperands.reserve(toPad->getNumOperands());
+ for (OpOperand &opOperand : toPad->getOpOperands()) {
Value operand = opOperand.get();
- LLVM_DEBUG(DBGS() << "--start padding oprd: " << operand << "\n");
+ LLVM_DEBUG(DBGS() << "--start padding operand: " << operand << "\n");
// 2.a. Skip scalar-like operands.
Type operandType = operand.getType();
@@ -311,30 +310,31 @@ FailureOr<TilingInterface> linalg::rewriteAsPaddedOp(
newOperands.push_back(operand);
continue;
}
+
// 2.a. Compute padded shape.
FailureOr<SmallVector<OpFoldResult>> maybePaddedShape =
- computePaddingSizeFun(rewriter, opOperand, iterationDomain, options);
+ computePaddingSizeFun(builder, opOperand, iterationDomain, options);
if (failed(maybePaddedShape)) {
- return rewriter.notifyMatchFailure(opToPad, "could not pad op");
+ LLVM_DEBUG(DBGS() << "Could not get padded shape of operand: FAIL\n");
+ return failure();
}
// 2.b. Expect proper `paddingValues`.
// TODO: we may want to allow garbage padding in the future, in which case
// we would just not assert.
if (opOperand.getOperandNumber() >= options.paddingValues.size()) {
- return rewriter.notifyMatchFailure(opToPad,
- "--no padding value specified");
+ LLVM_DEBUG(DBGS() << "Too few padding values specified: FAIL\n");
+ return failure();
}
Attribute paddingValueAttr =
options.paddingValues[opOperand.getOperandNumber()];
// 2.c. Perform actual padding.
- Value paddedOperand = padOperand(
- rewriter, opToPad, cast<TypedValue<RankedTensorType>>(operand),
- *maybePaddedShape, paddingValueAttr);
+ Value paddedOperand =
+ padOperand(builder, toPad, cast<TypedValue<RankedTensorType>>(operand),
+ *maybePaddedShape, paddingValueAttr);
LLVM_DEBUG(DBGS() << "--done padding operand: " << paddedOperand << "\n");
- // 2.d. Perform actual padding.
newOperands.push_back(paddedOperand);
if (auto padOp = paddedOperand.getDefiningOp<tensor::PadOp>())
padOps.push_back(padOp);
@@ -342,38 +342,34 @@ FailureOr<TilingInterface> linalg::rewriteAsPaddedOp(
// 3. Form the resulting tensor::ExtractSliceOp.
ReifiedRankedShapedTypeDims reifiedResultShapes;
- if (failed(reifyResultShapes(rewriter, opToPad, reifiedResultShapes))) {
- LLVM_DEBUG(DBGS() << "--failed to reify result shapes -> FAIL\n");
- return rewriter.notifyMatchFailure(opToPad,
- "failed to reify result shapes");
+ if (failed(reifyResultShapes(builder, toPad, reifiedResultShapes))) {
+ LLVM_DEBUG(DBGS() << "Failed to reify result shapes: FAIL\n");
+ return failure();
}
- assert(reifiedResultShapes.size() == opToPad->getNumResults() &&
+ assert(reifiedResultShapes.size() == toPad->getNumResults() &&
"expected same number of results");
- // Clone `opToPad` to operate on the statically padded shapes.
+ // Clone `toPad` to operate on the statically padded shapes.
auto resultTensorTypes =
- ValueRange(newOperands).take_back(opToPad->getNumResults()).getTypes();
- // clone **should** properly notify the rewriter.
+ ValueRange(newOperands).take_back(toPad->getNumResults()).getTypes();
+ // clone **should** properly notify the builder.
TilingInterface paddedOp =
- clone(rewriter, opToPad, resultTensorTypes, newOperands);
+ clone(builder, toPad, resultTensorTypes, newOperands);
LLVM_DEBUG(DBGS() << "--cloned padded op: " << paddedOp << "\n");
- // Recover the slice out of the new static results. This keeps the original
- // opToPad around because it uses the dims of the original results.
+ // Recover the slice out of the new static results.
SmallVector<Value> paddedSubtensorResults;
- paddedSubtensorResults.reserve(opToPad->getNumResults());
+ paddedSubtensorResults.reserve(toPad->getNumResults());
for (const auto &en : llvm::enumerate(paddedOp->getResults())) {
Value paddedResult = en.value();
int64_t resultNumber = en.index();
int64_t rank = cast<RankedTensorType>(paddedResult.getType()).getRank();
- SmallVector<OpFoldResult> offsets(rank, rewriter.getIndexAttr(0));
- SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
+ SmallVector<OpFoldResult> offsets(rank, builder.getIndexAttr(0));
+ SmallVector<OpFoldResult> strides(rank, builder.getIndexAttr(1));
paddedSubtensorResults.push_back(tensor::ExtractSliceOp::create(
- rewriter, loc, paddedResult, offsets, reifiedResultShapes[resultNumber],
+ builder, loc, paddedResult, offsets, reifiedResultShapes[resultNumber],
strides));
}
- rewriter.replaceOp(opToPad, paddedSubtensorResults);
-
- return paddedOp;
+ return PadTilingInterfaceResult{padOps, paddedOp, paddedSubtensorResults};
}
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 507597b..94947b7 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -2158,11 +2158,45 @@ public:
return success();
}
};
+
+struct ReinterpretCastOpConstantFolder
+ : public OpRewritePattern<ReinterpretCastOp> {
+public:
+ using OpRewritePattern<ReinterpretCastOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(ReinterpretCastOp op,
+ PatternRewriter &rewriter) const override {
+ unsigned srcStaticCount = llvm::count_if(
+ llvm::concat<OpFoldResult>(op.getMixedOffsets(), op.getMixedSizes(),
+ op.getMixedStrides()),
+ [](OpFoldResult ofr) { return isa<Attribute>(ofr); });
+
+ SmallVector<OpFoldResult> offsets = {op.getConstifiedMixedOffset()};
+ SmallVector<OpFoldResult> sizes = op.getConstifiedMixedSizes();
+ SmallVector<OpFoldResult> strides = op.getConstifiedMixedStrides();
+
+ // TODO: Using counting comparison instead of direct comparison because
+ // getMixedValues (and therefore ReinterpretCastOp::getMixed...) returns
+ // IntegerAttrs, while constifyIndexValues (and therefore
+ // ReinterpretCastOp::getConstifiedMixed...) returns IndexAttrs.
+ if (srcStaticCount ==
+ llvm::count_if(llvm::concat<OpFoldResult>(offsets, sizes, strides),
+ [](OpFoldResult ofr) { return isa<Attribute>(ofr); }))
+ return failure();
+
+ auto newReinterpretCast = ReinterpretCastOp::create(
+ rewriter, op->getLoc(), op.getSource(), offsets[0], sizes, strides);
+
+ rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newReinterpretCast);
+ return success();
+ }
+};
} // namespace
void ReinterpretCastOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.add<ReinterpretCastOpExtractStridedMetadataFolder>(context);
+ results.add<ReinterpretCastOpExtractStridedMetadataFolder,
+ ReinterpretCastOpConstantFolder>(context);
}
FailureOr<std::optional<SmallVector<Value>>>
diff --git a/mlir/lib/Dialect/MemRef/Transforms/EmulateWideInt.cpp b/mlir/lib/Dialect/MemRef/Transforms/EmulateWideInt.cpp
index 49b7162..6f815ae 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/EmulateWideInt.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/EmulateWideInt.cpp
@@ -121,7 +121,7 @@ struct EmulateWideIntPass final
[&typeConverter](Operation *op) { return typeConverter.isLegal(op); });
RewritePatternSet patterns(ctx);
- // Add common pattenrs to support contants, functions, etc.
+ // Add common patterns to support contants, functions, etc.
arith::populateArithWideIntEmulationPatterns(typeConverter, patterns);
memref::populateMemRefWideIntEmulationPatterns(typeConverter, patterns);
diff --git a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
index 90cbbd8..dcfe2c7 100644
--- a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
+++ b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
@@ -1030,12 +1030,12 @@ struct RemoveConstantIfConditionWithRegion : public OpRewritePattern<OpTy> {
//===----------------------------------------------------------------------===//
/// Create and populate an init region for privatization recipes.
-/// Returns the init block on success, or nullptr on failure.
+/// Returns success if the region is populated, failure otherwise.
/// Sets needsFree to indicate if the allocated memory requires deallocation.
-static std::unique_ptr<Block> createInitRegion(OpBuilder &builder, Location loc,
- Type varType, StringRef varName,
- ValueRange bounds,
- bool &needsFree) {
+static LogicalResult createInitRegion(OpBuilder &builder, Location loc,
+ Region &initRegion, Type varType,
+ StringRef varName, ValueRange bounds,
+ bool &needsFree) {
// Create init block with arguments: original value + bounds
SmallVector<Type> argTypes{varType};
SmallVector<Location> argLocs{loc};
@@ -1044,9 +1044,9 @@ static std::unique_ptr<Block> createInitRegion(OpBuilder &builder, Location loc,
argLocs.push_back(loc);
}
- auto initBlock = std::make_unique<Block>();
+ Block *initBlock = builder.createBlock(&initRegion);
initBlock->addArguments(argTypes, argLocs);
- builder.setInsertionPointToStart(initBlock.get());
+ builder.setInsertionPointToStart(initBlock);
Value privatizedValue;
@@ -1060,7 +1060,7 @@ static std::unique_ptr<Block> createInitRegion(OpBuilder &builder, Location loc,
privatizedValue = mappableTy.generatePrivateInit(
builder, loc, typedVar, varName, bounds, {}, needsFree);
if (!privatizedValue)
- return nullptr;
+ return failure();
} else {
assert(isa<PointerLikeType>(varType) && "Expected PointerLikeType");
auto pointerLikeTy = cast<PointerLikeType>(varType);
@@ -1068,21 +1068,21 @@ static std::unique_ptr<Block> createInitRegion(OpBuilder &builder, Location loc,
privatizedValue = pointerLikeTy.genAllocate(builder, loc, varName, varType,
blockArgVar, needsFree);
if (!privatizedValue)
- return nullptr;
+ return failure();
}
// Add yield operation to init block
acc::YieldOp::create(builder, loc, privatizedValue);
- return initBlock;
+ return success();
}
/// Create and populate a copy region for firstprivate recipes.
-/// Returns the copy block on success, or nullptr on failure.
+/// Returns success if the region is populated, failure otherwise.
/// TODO: Handle MappableType - it does not yet have a copy API.
-static std::unique_ptr<Block> createCopyRegion(OpBuilder &builder, Location loc,
- Type varType,
- ValueRange bounds) {
+static LogicalResult createCopyRegion(OpBuilder &builder, Location loc,
+ Region &copyRegion, Type varType,
+ ValueRange bounds) {
// Create copy block with arguments: original value + privatized value +
// bounds
SmallVector<Type> copyArgTypes{varType, varType};
@@ -1092,16 +1092,16 @@ static std::unique_ptr<Block> createCopyRegion(OpBuilder &builder, Location loc,
copyArgLocs.push_back(loc);
}
- auto copyBlock = std::make_unique<Block>();
+ Block *copyBlock = builder.createBlock(&copyRegion);
copyBlock->addArguments(copyArgTypes, copyArgLocs);
- builder.setInsertionPointToStart(copyBlock.get());
+ builder.setInsertionPointToStart(copyBlock);
bool isMappable = isa<MappableType>(varType);
bool isPointerLike = isa<PointerLikeType>(varType);
// TODO: Handle MappableType - it does not yet have a copy API.
// Otherwise, for now just fallback to pointer-like behavior.
if (isMappable && !isPointerLike)
- return nullptr;
+ return failure();
// Generate copy region body based on variable type
if (isPointerLike) {
@@ -1113,21 +1113,20 @@ static std::unique_ptr<Block> createCopyRegion(OpBuilder &builder, Location loc,
if (!pointerLikeTy.genCopy(
builder, loc, cast<TypedValue<PointerLikeType>>(privatizedArg),
cast<TypedValue<PointerLikeType>>(originalArg), varType))
- return nullptr;
+ return failure();
}
// Add terminator to copy block
acc::TerminatorOp::create(builder, loc);
- return copyBlock;
+ return success();
}
/// Create and populate a destroy region for privatization recipes.
-/// Returns the destroy block on success, or nullptr if not needed.
-static std::unique_ptr<Block> createDestroyRegion(OpBuilder &builder,
- Location loc, Type varType,
- Value allocRes,
- ValueRange bounds) {
+/// Returns success if the region is populated, failure otherwise.
+static LogicalResult createDestroyRegion(OpBuilder &builder, Location loc,
+ Region &destroyRegion, Type varType,
+ Value allocRes, ValueRange bounds) {
// Create destroy block with arguments: original value + privatized value +
// bounds
SmallVector<Type> destroyArgTypes{varType, varType};
@@ -1137,28 +1136,25 @@ static std::unique_ptr<Block> createDestroyRegion(OpBuilder &builder,
destroyArgLocs.push_back(loc);
}
- auto destroyBlock = std::make_unique<Block>();
+ Block *destroyBlock = builder.createBlock(&destroyRegion);
destroyBlock->addArguments(destroyArgTypes, destroyArgLocs);
- builder.setInsertionPointToStart(destroyBlock.get());
+ builder.setInsertionPointToStart(destroyBlock);
- bool isMappable = isa<MappableType>(varType);
- bool isPointerLike = isa<PointerLikeType>(varType);
- // TODO: Handle MappableType - it does not yet have a deallocation API.
- // Otherwise, for now just fallback to pointer-like behavior.
- if (isMappable && !isPointerLike)
- return nullptr;
-
- assert(isa<PointerLikeType>(varType) && "Expected PointerLikeType");
- auto pointerLikeTy = cast<PointerLikeType>(varType);
- auto privatizedArg =
+ auto varToFree =
cast<TypedValue<PointerLikeType>>(destroyBlock->getArgument(1));
- // Pass allocRes to help determine the allocation type
- if (!pointerLikeTy.genFree(builder, loc, privatizedArg, allocRes, varType))
- return nullptr;
+ if (isa<MappableType>(varType)) {
+ auto mappableTy = cast<MappableType>(varType);
+ if (!mappableTy.generatePrivateDestroy(builder, loc, varToFree))
+ return failure();
+ } else {
+ assert(isa<PointerLikeType>(varType) && "Expected PointerLikeType");
+ auto pointerLikeTy = cast<PointerLikeType>(varType);
+ if (!pointerLikeTy.genFree(builder, loc, varToFree, allocRes, varType))
+ return failure();
+ }
acc::TerminatorOp::create(builder, loc);
-
- return destroyBlock;
+ return success();
}
} // namespace
@@ -1220,40 +1216,33 @@ PrivateRecipeOp::createAndPopulate(OpBuilder &builder, Location loc,
if (!isMappable && !isPointerLike)
return std::nullopt;
- // Create init and destroy blocks using shared helpers
OpBuilder::InsertionGuard guard(builder);
- // Save the original insertion point for creating the recipe operation later
- auto originalInsertionPoint = builder.saveInsertionPoint();
+ // Create the recipe operation first so regions have proper parent context
+ auto recipe = PrivateRecipeOp::create(builder, loc, recipeName, varType);
+ // Populate the init region
bool needsFree = false;
- auto initBlock =
- createInitRegion(builder, loc, varType, varName, bounds, needsFree);
- if (!initBlock)
+ if (failed(createInitRegion(builder, loc, recipe.getInitRegion(), varType,
+ varName, bounds, needsFree))) {
+ recipe.erase();
return std::nullopt;
+ }
// Only create destroy region if the allocation needs deallocation
- std::unique_ptr<Block> destroyBlock;
if (needsFree) {
// Extract the allocated value from the init block's yield operation
- auto yieldOp = cast<acc::YieldOp>(initBlock->getTerminator());
+ auto yieldOp =
+ cast<acc::YieldOp>(recipe.getInitRegion().front().getTerminator());
Value allocRes = yieldOp.getOperand(0);
- destroyBlock = createDestroyRegion(builder, loc, varType, allocRes, bounds);
- if (!destroyBlock)
+ if (failed(createDestroyRegion(builder, loc, recipe.getDestroyRegion(),
+ varType, allocRes, bounds))) {
+ recipe.erase();
return std::nullopt;
+ }
}
- // Now create the recipe operation at the original insertion point and attach
- // the blocks
- builder.restoreInsertionPoint(originalInsertionPoint);
- auto recipe = PrivateRecipeOp::create(builder, loc, recipeName, varType);
-
- // Move the blocks into the recipe's regions
- recipe.getInitRegion().push_back(initBlock.release());
- if (destroyBlock)
- recipe.getDestroyRegion().push_back(destroyBlock.release());
-
return recipe;
}
@@ -1299,45 +1288,40 @@ FirstprivateRecipeOp::createAndPopulate(OpBuilder &builder, Location loc,
if (!isMappable && !isPointerLike)
return std::nullopt;
- // Create init, copy, and destroy blocks using shared helpers
OpBuilder::InsertionGuard guard(builder);
- // Save the original insertion point for creating the recipe operation later
- auto originalInsertionPoint = builder.saveInsertionPoint();
+ // Create the recipe operation first so regions have proper parent context
+ auto recipe = FirstprivateRecipeOp::create(builder, loc, recipeName, varType);
+ // Populate the init region
bool needsFree = false;
- auto initBlock =
- createInitRegion(builder, loc, varType, varName, bounds, needsFree);
- if (!initBlock)
+ if (failed(createInitRegion(builder, loc, recipe.getInitRegion(), varType,
+ varName, bounds, needsFree))) {
+ recipe.erase();
return std::nullopt;
+ }
- auto copyBlock = createCopyRegion(builder, loc, varType, bounds);
- if (!copyBlock)
+ // Populate the copy region
+ if (failed(createCopyRegion(builder, loc, recipe.getCopyRegion(), varType,
+ bounds))) {
+ recipe.erase();
return std::nullopt;
+ }
// Only create destroy region if the allocation needs deallocation
- std::unique_ptr<Block> destroyBlock;
if (needsFree) {
// Extract the allocated value from the init block's yield operation
- auto yieldOp = cast<acc::YieldOp>(initBlock->getTerminator());
+ auto yieldOp =
+ cast<acc::YieldOp>(recipe.getInitRegion().front().getTerminator());
Value allocRes = yieldOp.getOperand(0);
- destroyBlock = createDestroyRegion(builder, loc, varType, allocRes, bounds);
- if (!destroyBlock)
+ if (failed(createDestroyRegion(builder, loc, recipe.getDestroyRegion(),
+ varType, allocRes, bounds))) {
+ recipe.erase();
return std::nullopt;
+ }
}
- // Now create the recipe operation at the original insertion point and attach
- // the blocks
- builder.restoreInsertionPoint(originalInsertionPoint);
- auto recipe = FirstprivateRecipeOp::create(builder, loc, recipeName, varType);
-
- // Move the blocks into the recipe's regions
- recipe.getInitRegion().push_back(initBlock.release());
- recipe.getCopyRegion().push_back(copyBlock.release());
- if (destroyBlock)
- recipe.getDestroyRegion().push_back(destroyBlock.release());
-
return recipe;
}
diff --git a/mlir/lib/Dialect/Transform/SMTExtension/SMTExtensionOps.cpp b/mlir/lib/Dialect/Transform/SMTExtension/SMTExtensionOps.cpp
index 8e7af05..abc1316 100644
--- a/mlir/lib/Dialect/Transform/SMTExtension/SMTExtensionOps.cpp
+++ b/mlir/lib/Dialect/Transform/SMTExtension/SMTExtensionOps.cpp
@@ -8,8 +8,8 @@
#include "mlir/Dialect/Transform/SMTExtension/SMTExtensionOps.h"
#include "mlir/Dialect/SMT/IR/SMTDialect.h"
-#include "mlir/Dialect/Transform/IR/TransformOps.h"
-#include "mlir/Dialect/Transform/SMTExtension/SMTExtension.h"
+#include "mlir/Dialect/SMT/IR/SMTOps.h"
+#include "mlir/Dialect/Transform/IR/TransformTypes.h"
using namespace mlir;
@@ -23,6 +23,7 @@ using namespace mlir;
void transform::smt::ConstrainParamsOp::getEffects(
SmallVectorImpl<MemoryEffects::EffectInstance> &effects) {
onlyReadsHandle(getParamsMutable(), effects);
+ producesHandle(getResults(), effects);
}
DiagnosedSilenceableFailure
@@ -37,19 +38,95 @@ transform::smt::ConstrainParamsOp::apply(transform::TransformRewriter &rewriter,
// and allow for users to attach their own implementation, which would,
// e.g., translate the ops to SMTLIB and hand that over to the user's
// favourite solver. This requires changes to the dialect's verifier.
- return emitDefiniteFailure() << "op does not have interpreted semantics yet";
+ return emitSilenceableFailure(getLoc())
+ << "op does not have interpreted semantics yet";
}
LogicalResult transform::smt::ConstrainParamsOp::verify() {
+ auto yieldTerminator =
+ dyn_cast<mlir::smt::YieldOp>(getRegion().front().back());
+ if (!yieldTerminator)
+ return emitOpError() << "expected '"
+ << mlir::smt::YieldOp::getOperationName()
+ << "' as terminator";
+
+ auto checkTypes = [](size_t idx, Type smtType, StringRef smtDesc,
+ Type paramType, StringRef paramDesc,
+ auto *atOp) -> InFlightDiagnostic {
+ if (!isa<mlir::smt::BoolType, mlir::smt::IntType, mlir::smt::BitVectorType>(
+ smtType))
+ return atOp->emitOpError() << "the type of " << smtDesc << " #" << idx
+ << " is expected to be either a !smt.bool, a "
+ "!smt.int, or a !smt.bv";
+
+ assert(isa<TransformParamTypeInterface>(paramType) &&
+ "ODS specifies params' type should implement param interface");
+ if (isa<transform::AnyParamType>(paramType))
+ return {}; // No further checks can be done.
+
+ // NB: This cast must succeed as long as the only implementors of
+ // TransformParamTypeInterface are AnyParamType and ParamType.
+ Type typeWrappedByParam = cast<ParamType>(paramType).getType();
+
+ if (isa<mlir::smt::IntType>(smtType)) {
+ if (!isa<IntegerType>(typeWrappedByParam))
+ return atOp->emitOpError()
+ << "the type of " << smtDesc << " #" << idx
+ << " is !smt.int though the corresponding " << paramDesc
+ << " type (" << paramType << ") is not wrapping an integer type";
+ } else if (isa<mlir::smt::BoolType>(smtType)) {
+ auto wrappedIntType = dyn_cast<IntegerType>(typeWrappedByParam);
+ if (!wrappedIntType || wrappedIntType.getWidth() != 1)
+ return atOp->emitOpError()
+ << "the type of " << smtDesc << " #" << idx
+ << " is !smt.bool though the corresponding " << paramDesc
+ << " type (" << paramType << ") is not wrapping i1";
+ } else if (auto bvSmtType = dyn_cast<mlir::smt::BitVectorType>(smtType)) {
+ auto wrappedIntType = dyn_cast<IntegerType>(typeWrappedByParam);
+ if (!wrappedIntType || wrappedIntType.getWidth() != bvSmtType.getWidth())
+ return atOp->emitOpError()
+ << "the type of " << smtDesc << " #" << idx << " is " << smtType
+ << " though the corresponding " << paramDesc << " type ("
+ << paramType
+ << ") is not wrapping an integer type of the same bitwidth";
+ }
+
+ return {};
+ };
+
if (getOperands().size() != getBody().getNumArguments())
return emitOpError(
"must have the same number of block arguments as operands");
+ for (auto [idx, operandType, blockArgType] :
+ llvm::enumerate(getOperandTypes(), getBody().getArgumentTypes())) {
+ InFlightDiagnostic typeCheckResult =
+ checkTypes(idx, blockArgType, "block arg", operandType, "operand",
+ /*atOp=*/this);
+ if (LogicalResult(typeCheckResult).failed())
+ return typeCheckResult;
+ }
+
for (auto &op : getBody().getOps()) {
if (!isa<mlir::smt::SMTDialect>(op.getDialect()))
return emitOpError(
"ops contained in region should belong to SMT-dialect");
}
+ if (yieldTerminator->getNumOperands() != getNumResults())
+ return yieldTerminator.emitOpError()
+ << "expected terminator to have as many operands as the parent op "
+ "has results";
+
+ for (auto [idx, termOperandType, resultType] : llvm::enumerate(
+ yieldTerminator->getOperands().getType(), getResultTypes())) {
+ InFlightDiagnostic typeCheckResult =
+ checkTypes(idx, termOperandType, "terminator operand",
+ cast<transform::ParamType>(resultType), "result",
+ /*atOp=*/&yieldTerminator);
+ if (LogicalResult(typeCheckResult).failed())
+ return typeCheckResult;
+ }
+
return success();
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
index 12e6475..7c019e7 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
@@ -2032,11 +2032,19 @@ struct WarpOpScfForOp : public WarpDistributionPattern {
}
// Newly created `WarpOp` will yield values in following order:
- // 1. All init args of the `ForOp`.
- // 2. All escaping values.
- // 3. All non-`ForOp` yielded values.
+ // 1. Loop bounds.
+ // 2. All init args of the `ForOp`.
+ // 3. All escaping values.
+ // 4. All non-`ForOp` yielded values.
SmallVector<Value> newWarpOpYieldValues;
SmallVector<Type> newWarpOpDistTypes;
+ newWarpOpYieldValues.insert(
+ newWarpOpYieldValues.end(),
+ {forOp.getLowerBound(), forOp.getUpperBound(), forOp.getStep()});
+ newWarpOpDistTypes.insert(newWarpOpDistTypes.end(),
+ {forOp.getLowerBound().getType(),
+ forOp.getUpperBound().getType(),
+ forOp.getStep().getType()});
for (auto [i, initArg] : llvm::enumerate(forOp.getInitArgs())) {
newWarpOpYieldValues.push_back(initArg);
// Compute the distributed type for this init arg.
@@ -2072,20 +2080,24 @@ struct WarpOpScfForOp : public WarpDistributionPattern {
// Next, we create a new `ForOp` with the init args yielded by the new
// `WarpOp`.
+ const unsigned initArgsStartIdx = 3; // After loop bounds.
const unsigned escapingValuesStartIdx =
+ initArgsStartIdx +
forOp.getInitArgs().size(); // `ForOp` init args are positioned before
// escaping values in the new `WarpOp`.
SmallVector<Value> newForOpOperands;
- for (size_t i = 0; i < escapingValuesStartIdx; ++i)
+ for (size_t i = initArgsStartIdx; i < escapingValuesStartIdx; ++i)
newForOpOperands.push_back(newWarpOp.getResult(newIndices[i]));
// Create a new `ForOp` outside the new `WarpOp` region.
OpBuilder::InsertionGuard g(rewriter);
rewriter.setInsertionPointAfter(newWarpOp);
auto newForOp = scf::ForOp::create(
- rewriter, forOp.getLoc(), forOp.getLowerBound(), forOp.getUpperBound(),
- forOp.getStep(), newForOpOperands, /*bodyBuilder=*/nullptr,
- forOp.getUnsignedCmp());
+ rewriter, forOp.getLoc(),
+ /**LowerBound=**/ newWarpOp.getResult(newIndices[0]),
+ /**UpperBound=**/ newWarpOp.getResult(newIndices[1]),
+ /**Step=**/ newWarpOp.getResult(newIndices[2]), newForOpOperands,
+ /*bodyBuilder=*/nullptr, forOp.getUnsignedCmp());
// Next, we insert a new `WarpOp` (called inner `WarpOp`) inside the
// newly created `ForOp`. This `WarpOp` will contain all ops that were
// contained within the original `ForOp` body.
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
index 1b656d8..ea93085 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
@@ -817,6 +817,50 @@ struct LinearizeVectorToElements final
}
};
+/// Convert broadcasts from scalars or 1-element vectors, such as
+///
+/// ```mlir
+/// vector.broadcast %value : f32 to vector<4x4xf32>
+/// ```
+///
+/// to broadcasts to rank-1 vectors, with shape_casts before/after as needed.
+/// The above becomes,
+///
+/// ```mlir
+/// %out_1d = vector.broadcast %value : f32 to vector<16xf32>
+/// %out_nd = vector.shape_cast %out_1d : vector<16xf32> to vector<4x4xf32>
+/// ```
+struct LinearizeVectorBroadcast final
+ : public OpConversionPattern<vector::BroadcastOp> {
+ using Base::Base;
+
+ LinearizeVectorBroadcast(const TypeConverter &typeConverter,
+ MLIRContext *context, PatternBenefit benefit = 1)
+ : OpConversionPattern(typeConverter, context, benefit) {}
+
+ LogicalResult
+ matchAndRewrite(vector::BroadcastOp broadcastOp, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+
+ int numElements = 1;
+ Type sourceType = broadcastOp.getSourceType();
+ if (auto vecType = dyn_cast<VectorType>(sourceType)) {
+ numElements = vecType.getNumElements();
+ }
+
+ if (numElements != 1) {
+ return rewriter.notifyMatchFailure(
+ broadcastOp, "only broadcasts of single elements can be linearized.");
+ }
+
+ auto dstTy = getTypeConverter()->convertType(broadcastOp.getType());
+ rewriter.replaceOpWithNewOp<vector::BroadcastOp>(broadcastOp, dstTy,
+ adaptor.getSource());
+
+ return success();
+ }
+};
+
} // namespace
/// This method defines the set of operations that are linearizable, and hence
@@ -909,8 +953,8 @@ void mlir::vector::populateVectorLinearizeBasePatterns(
patterns
.add<LinearizeConstantLike, LinearizeVectorizable, LinearizeVectorBitCast,
LinearizeVectorCreateMask, LinearizeVectorLoad, LinearizeVectorStore,
- LinearizeVectorFromElements, LinearizeVectorToElements>(
- typeConverter, patterns.getContext());
+ LinearizeVectorBroadcast, LinearizeVectorFromElements,
+ LinearizeVectorToElements>(typeConverter, patterns.getContext());
}
void mlir::vector::populateVectorLinearizeShuffleLikeOpsPatterns(
diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index 025ee9a..c809c502 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -91,7 +91,7 @@ mlir::vector::isTranspose2DSlice(vector::TransposeOp op) {
// Check whether the two source vector dimensions that are greater than one
// must be transposed with each other so that we can apply one of the 2-D
- // transpose pattens. Otherwise, these patterns are not applicable.
+ // transpose patterns. Otherwise, these patterns are not applicable.
if (!areDimsTransposedIn2DSlice(srcGtOneDims[0], srcGtOneDims[1],
op.getPermutation()))
return failure();
diff --git a/mlir/lib/Dialect/WasmSSA/IR/WasmSSAOps.cpp b/mlir/lib/Dialect/WasmSSA/IR/WasmSSAOps.cpp
index 89b62a2..a514ea9 100644
--- a/mlir/lib/Dialect/WasmSSA/IR/WasmSSAOps.cpp
+++ b/mlir/lib/Dialect/WasmSSA/IR/WasmSSAOps.cpp
@@ -12,6 +12,7 @@
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
+#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/SymbolTable.h"
@@ -39,28 +40,6 @@ void printElseRegion(OpAsmPrinter &opPrinter, Operation *op,
opPrinter.printKeywordOrString("else ");
opPrinter.printRegion(elseRegion);
}
-
-ParseResult parseWasmVisibility(OpAsmParser &opParser, StringAttr &visibility) {
- std::string keyword;
- auto initLocation = opParser.getCurrentLocation();
- std::ignore = opParser.parseOptionalKeywordOrString(&keyword);
- if (keyword == "nested" or keyword == "") {
- visibility = StringAttr::get(opParser.getContext(), "nested");
- return ParseResult::success();
- }
-
- if (keyword == "public" || keyword == "private") {
- visibility = StringAttr::get(opParser.getContext(), keyword);
- return ParseResult::success();
- }
- opParser.emitError(initLocation, "expecting symbol visibility");
- return ParseResult::failure();
-}
-
-void printWasmVisibility(OpAsmPrinter &opPrinter, Operation *op,
- Attribute visibility) {
- opPrinter.printKeywordOrString(cast<StringAttr>(visibility).strref());
-}
} // namespace
#define GET_OP_CLASSES
@@ -167,10 +146,23 @@ Block *FuncOp::addEntryBlock() {
void FuncOp::build(OpBuilder &odsBuilder, OperationState &odsState,
StringRef symbol, FunctionType funcType) {
- FuncOp::build(odsBuilder, odsState, symbol, funcType, {}, {}, "nested");
+ FuncOp::build(odsBuilder, odsState, symbol, funcType, {}, {});
}
ParseResult FuncOp::parse(OpAsmParser &parser, OperationState &result) {
+ auto *ctx = parser.getContext();
+ std::string visibilityString;
+ auto loc = parser.getNameLoc();
+ ParseResult res = parser.parseOptionalKeywordOrString(&visibilityString);
+ bool exported{false};
+ if (res.succeeded()) {
+ if (visibilityString != "exported")
+ return parser.emitError(
+ loc, "expecting either `exported` or symbol name. got ")
+ << visibilityString;
+ exported = true;
+ }
+
auto buildFuncType = [&parser](Builder &builder, ArrayRef<Type> argTypes,
ArrayRef<Type> results,
function_interface_impl::VariadicFlag,
@@ -191,11 +183,13 @@ ParseResult FuncOp::parse(OpAsmParser &parser, OperationState &result) {
return builder.getFunctionType(argTypesWithoutLocal, results);
};
-
- return function_interface_impl::parseFunctionOp(
+ auto funcParseRes = function_interface_impl::parseFunctionOp(
parser, result, /*allowVariadic=*/false,
getFunctionTypeAttrName(result.name), buildFuncType,
getArgAttrsAttrName(result.name), getResAttrsAttrName(result.name));
+ if (exported)
+ result.addAttribute(getExportedAttrName(result.name), UnitAttr::get(ctx));
+ return funcParseRes;
}
LogicalResult FuncOp::verifyBody() {
@@ -224,9 +218,18 @@ LogicalResult FuncOp::verifyBody() {
}
void FuncOp::print(OpAsmPrinter &p) {
+ /// If exported, print it before and mask it before printing
+ /// using generic interface.
+ auto exported = getExported();
+ if (exported) {
+ p << " exported";
+ removeExportedAttr();
+ }
function_interface_impl::printFunctionOp(
p, *this, /*isVariadic=*/false, getFunctionTypeAttrName(),
getArgAttrsAttrName(), getResAttrsAttrName());
+ if (exported)
+ setExported(true);
}
//===----------------------------------------------------------------------===//
@@ -237,38 +240,37 @@ void FuncImportOp::build(OpBuilder &odsBuilder, OperationState &odsState,
StringRef symbol, StringRef moduleName,
StringRef importName, FunctionType type) {
FuncImportOp::build(odsBuilder, odsState, symbol, moduleName, importName,
- type, {}, {}, odsBuilder.getStringAttr("nested"));
+ type, {}, {});
}
//===----------------------------------------------------------------------===//
// GlobalOp
//===----------------------------------------------------------------------===//
-
-void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState,
- StringRef symbol, Type type, bool isMutable) {
- GlobalOp::build(odsBuilder, odsState, symbol, type, isMutable,
- odsBuilder.getStringAttr("nested"));
-}
-
// Custom formats
ParseResult GlobalOp::parse(OpAsmParser &parser, OperationState &result) {
StringAttr symbolName;
Type globalType;
auto *ctx = parser.getContext();
- ParseResult res = parser.parseSymbolName(
- symbolName, SymbolTable::getSymbolAttrName(), result.attributes);
+ std::string visibilityString;
+ auto loc = parser.getNameLoc();
+ ParseResult res = parser.parseOptionalKeywordOrString(&visibilityString);
+ if (res.succeeded()) {
+ if (visibilityString != "exported")
+ return parser.emitError(
+ loc, "expecting either `exported` or symbol name. got ")
+ << visibilityString;
+ result.addAttribute(getExportedAttrName(result.name), UnitAttr::get(ctx));
+ }
+ res = parser.parseSymbolName(symbolName, SymbolTable::getSymbolAttrName(),
+ result.attributes);
res = parser.parseType(globalType);
result.addAttribute(getTypeAttrName(result.name), TypeAttr::get(globalType));
std::string mutableString;
res = parser.parseOptionalKeywordOrString(&mutableString);
if (res.succeeded() && mutableString == "mutable")
result.addAttribute("isMutable", UnitAttr::get(ctx));
- std::string visibilityString;
- res = parser.parseOptionalKeywordOrString(&visibilityString);
- if (res.succeeded())
- result.addAttribute("sym_visibility",
- StringAttr::get(ctx, visibilityString));
+
res = parser.parseColon();
Region *globalInitRegion = result.addRegion();
res = parser.parseRegion(*globalInitRegion);
@@ -276,11 +278,11 @@ ParseResult GlobalOp::parse(OpAsmParser &parser, OperationState &result) {
}
void GlobalOp::print(OpAsmPrinter &printer) {
+ if (getExported())
+ printer << " exported";
printer << " @" << getSymName().str() << " " << getType();
if (getIsMutable())
printer << " mutable";
- if (auto vis = getSymVisibility())
- printer << " " << *vis;
printer << " :";
Region &body = getRegion();
if (!body.empty()) {
@@ -319,13 +321,6 @@ GlobalGetOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
// GlobalImportOp
//===----------------------------------------------------------------------===//
-void GlobalImportOp::build(OpBuilder &odsBuilder, OperationState &odsState,
- StringRef symbol, StringRef moduleName,
- StringRef importName, Type type, bool isMutable) {
- GlobalImportOp::build(odsBuilder, odsState, symbol, moduleName, importName,
- type, isMutable, odsBuilder.getStringAttr("nested"));
-}
-
ParseResult GlobalImportOp::parse(OpAsmParser &parser, OperationState &result) {
auto *ctx = parser.getContext();
ParseResult res = parseImportOp(parser, result);
@@ -335,12 +330,8 @@ ParseResult GlobalImportOp::parse(OpAsmParser &parser, OperationState &result) {
res = parser.parseOptionalKeywordOrString(&mutableOrSymVisString);
if (res.succeeded() && mutableOrSymVisString == "mutable") {
result.addAttribute("isMutable", UnitAttr::get(ctx));
- res = parser.parseOptionalKeywordOrString(&mutableOrSymVisString);
}
- if (res.succeeded())
- result.addAttribute("sym_visibility",
- StringAttr::get(ctx, mutableOrSymVisString));
res = parser.parseColon();
Type importedType;
@@ -356,8 +347,6 @@ void GlobalImportOp::print(OpAsmPrinter &printer) {
<< "\" as @" << getSymName();
if (getIsMutable())
printer << " mutable";
- if (auto vis = getSymVisibility())
- printer << " " << *vis;
printer << " : " << getType();
}
@@ -431,27 +420,6 @@ LogicalResult LocalTeeOp::verify() {
Block *LoopOp::getLabelTarget() { return &getBody().front(); }
//===----------------------------------------------------------------------===//
-// MemOp
-//===----------------------------------------------------------------------===//
-
-void MemOp::build(OpBuilder &odsBuilder, OperationState &odsState,
- StringRef symbol, LimitType limit) {
- MemOp::build(odsBuilder, odsState, symbol, limit,
- odsBuilder.getStringAttr("nested"));
-}
-
-//===----------------------------------------------------------------------===//
-// MemImportOp
-//===----------------------------------------------------------------------===//
-
-void MemImportOp::build(OpBuilder &odsBuilder, OperationState &odsState,
- StringRef symbol, StringRef moduleName,
- StringRef importName, LimitType limits) {
- MemImportOp::build(odsBuilder, odsState, symbol, moduleName, importName,
- limits, odsBuilder.getStringAttr("nested"));
-}
-
-//===----------------------------------------------------------------------===//
// ReinterpretOp
//===----------------------------------------------------------------------===//
@@ -471,24 +439,3 @@ LogicalResult ReinterpretOp::verify() {
//===----------------------------------------------------------------------===//
void ReturnOp::build(OpBuilder &odsBuilder, OperationState &odsState) {}
-
-//===----------------------------------------------------------------------===//
-// TableOp
-//===----------------------------------------------------------------------===//
-
-void TableOp::build(OpBuilder &odsBuilder, OperationState &odsState,
- StringRef symbol, TableType type) {
- TableOp::build(odsBuilder, odsState, symbol, type,
- odsBuilder.getStringAttr("nested"));
-}
-
-//===----------------------------------------------------------------------===//
-// TableImportOp
-//===----------------------------------------------------------------------===//
-
-void TableImportOp::build(OpBuilder &odsBuilder, OperationState &odsState,
- StringRef symbol, StringRef moduleName,
- StringRef importName, TableType type) {
- TableImportOp::build(odsBuilder, odsState, symbol, moduleName, importName,
- type, odsBuilder.getStringAttr("nested"));
-}
diff --git a/mlir/lib/RegisterAllPasses.cpp b/mlir/lib/RegisterAllPasses.cpp
index c67b242..dd413d2de 100644
--- a/mlir/lib/RegisterAllPasses.cpp
+++ b/mlir/lib/RegisterAllPasses.cpp
@@ -98,4 +98,5 @@ void mlir::registerAllPasses() {
sparse_tensor::registerSparseTensorPipelines();
tosa::registerTosaToLinalgPipelines();
gpu::registerGPUToNVVMPipeline();
+ gpu::registerGPUToXeVMPipeline();
}
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 1e2099d..8de49dd 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -246,7 +246,7 @@ public:
// Rewrite all uses of the original variable in `BBName`
// with the linear variable in-place
- void rewriteInPlace(llvm::IRBuilderBase &builder, std::string BBName,
+ void rewriteInPlace(llvm::IRBuilderBase &builder, const std::string &BBName,
size_t varIndex) {
llvm::SmallVector<llvm::User *> users;
for (llvm::User *user : linearOrigVal[varIndex]->users())
diff --git a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
index 0c3e87a..d9ad8fb 100644
--- a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
+++ b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
@@ -2619,6 +2619,11 @@ LogicalResult ControlFlowStructurizer::structurize() {
// region. We cannot handle such cases given that once a value is sinked into
// the SelectionOp/LoopOp's region, there is no escape for it.
for (auto *block : constructBlocks) {
+ if (!block->use_empty())
+ return emitError(block->getParent()->getLoc(),
+ "failed control flow structurization: "
+ "block has uses outside of the "
+ "enclosing selection/loop construct");
for (Operation &op : *block)
if (!op.use_empty())
return op.emitOpError("failed control flow structurization: value has "
diff --git a/mlir/lib/Target/Wasm/TranslateFromWasm.cpp b/mlir/lib/Target/Wasm/TranslateFromWasm.cpp
index 51c6077..366ba8f 100644
--- a/mlir/lib/Target/Wasm/TranslateFromWasm.cpp
+++ b/mlir/lib/Target/Wasm/TranslateFromWasm.cpp
@@ -14,6 +14,7 @@
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
+#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/Support/LLVM.h"
@@ -138,6 +139,10 @@ using ImportDesc =
using parsed_inst_t = FailureOr<SmallVector<Value>>;
+struct EmptyBlockMarker {};
+using BlockTypeParseResult =
+ std::variant<EmptyBlockMarker, TypeIdxRecord, Type>;
+
struct WasmModuleSymbolTables {
SmallVector<FunctionSymbolRefContainer> funcSymbols;
SmallVector<GlobalSymbolRefContainer> globalSymbols;
@@ -175,6 +180,9 @@ class ParserHead;
/// Wrapper around SmallVector to only allow access as push and pop on the
/// stack. Makes sure that there are no "free accesses" on the stack to preserve
/// its state.
+/// This class also keep tracks of the Wasm labels defined by different ops,
+/// which can be targeted by control flow ops. This can be modeled as part of
+/// the Value Stack as Wasm control flow ops can only target enclosing labels.
class ValueStack {
private:
struct LabelLevel {
@@ -206,6 +214,16 @@ public:
/// if an error occurs.
LogicalResult pushResults(ValueRange results, Location *opLoc);
+ void addLabelLevel(LabelLevelOpInterface levelOp) {
+ labelLevel.push_back({values.size(), levelOp});
+ LDBG() << "Adding a new frame context to ValueStack";
+ }
+
+ void dropLabelLevel() {
+ assert(!labelLevel.empty() && "Trying to drop a frame from empty context");
+ auto newSize = labelLevel.pop_back_val().stackIdx;
+ values.truncate(newSize);
+ }
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// A simple dump function for debugging.
/// Writes output to llvm::dbgs().
@@ -214,6 +232,7 @@ public:
private:
SmallVector<Value> values;
+ SmallVector<LabelLevel> labelLevel;
};
using local_val_t = TypedValue<wasmssa::LocalRefType>;
@@ -248,6 +267,19 @@ private:
buildNumericOp(OpBuilder &builder,
std::enable_if_t<std::is_arithmetic_v<valueType>> * = nullptr);
+ /// Construct a conversion operation of type \p opType that takes a value from
+ /// type \p inputType on the stack and will produce a value of type
+ /// \p outputType.
+ ///
+ /// \p opType - The WASM dialect operation to build.
+ /// \p inputType - The operand type for the built instruction.
+ /// \p outputType - The result type for the built instruction.
+ ///
+ /// \returns The parsed instruction result, or failure.
+ template <typename opType, typename inputType, typename outputType,
+ typename... extraArgsT>
+ inline parsed_inst_t buildConvertOp(OpBuilder &builder, extraArgsT...);
+
/// This function generates a dispatch tree to associate an opcode with a
/// parser. Parsers are registered by specialising the
/// `parseSpecificInstruction` function for the op code to handle.
@@ -280,11 +312,105 @@ private:
}
}
+ ///
+ /// RAII guard class for creating a nesting level
+ ///
+ struct NestingContextGuard {
+ NestingContextGuard(ExpressionParser &parser, LabelLevelOpInterface levelOp)
+ : parser{parser} {
+ parser.addNestingContextLevel(levelOp);
+ }
+ NestingContextGuard(NestingContextGuard &&other) : parser{other.parser} {
+ other.shouldDropOnDestruct = false;
+ }
+ NestingContextGuard(NestingContextGuard const &) = delete;
+ ~NestingContextGuard() {
+ if (shouldDropOnDestruct)
+ parser.dropNestingContextLevel();
+ }
+ ExpressionParser &parser;
+ bool shouldDropOnDestruct = true;
+ };
+
+ void addNestingContextLevel(LabelLevelOpInterface levelOp) {
+ valueStack.addLabelLevel(levelOp);
+ }
+
+ void dropNestingContextLevel() {
+ // Should always succeed as we are droping the frame that was previously
+ // created.
+ valueStack.dropLabelLevel();
+ }
+
+ llvm::FailureOr<FunctionType> getFuncTypeFor(OpBuilder &builder,
+ EmptyBlockMarker) {
+ return builder.getFunctionType({}, {});
+ }
+
+ llvm::FailureOr<FunctionType> getFuncTypeFor(OpBuilder &builder,
+ TypeIdxRecord type) {
+ if (type.id >= symbols.moduleFuncTypes.size())
+ return emitError(*currentOpLoc,
+ "type index references nonexistent type (")
+ << type.id << "). Only " << symbols.moduleFuncTypes.size()
+ << " types are registered";
+ return symbols.moduleFuncTypes[type.id];
+ }
+
+ llvm::FailureOr<FunctionType> getFuncTypeFor(OpBuilder &builder,
+ Type valType) {
+ return builder.getFunctionType({}, {valType});
+ }
+
+ llvm::FailureOr<FunctionType>
+ getFuncTypeFor(OpBuilder &builder, BlockTypeParseResult parseResult) {
+ return std::visit(
+ [this, &builder](auto value) { return getFuncTypeFor(builder, value); },
+ parseResult);
+ }
+
+ llvm::FailureOr<FunctionType>
+ getFuncTypeFor(OpBuilder &builder,
+ llvm::FailureOr<BlockTypeParseResult> parseResult) {
+ if (llvm::failed(parseResult))
+ return failure();
+ return getFuncTypeFor(builder, *parseResult);
+ }
+
+ llvm::FailureOr<FunctionType> parseBlockFuncType(OpBuilder &builder);
+
struct ParseResultWithInfo {
SmallVector<Value> opResults;
std::byte endingByte;
};
+ template <typename FilterT = ByteSequence<WasmBinaryEncoding::endByte>>
+ /// @param blockToFill: the block which content will be populated
+ /// @param resType: the type that this block is supposed to return
+ llvm::FailureOr<std::byte>
+ parseBlockContent(OpBuilder &builder, Block *blockToFill, TypeRange resTypes,
+ Location opLoc, LabelLevelOpInterface levelOp,
+ FilterT parseEndBytes = {}) {
+ OpBuilder::InsertionGuard guard{builder};
+ builder.setInsertionPointToStart(blockToFill);
+ LDBG() << "parsing a block of type "
+ << builder.getFunctionType(blockToFill->getArgumentTypes(),
+ resTypes);
+ auto nC = addNesting(levelOp);
+
+ if (failed(pushResults(blockToFill->getArguments())))
+ return failure();
+ auto bodyParsingRes = parse(builder, parseEndBytes);
+ if (failed(bodyParsingRes))
+ return failure();
+ auto returnOperands = popOperands(resTypes);
+ if (failed(returnOperands))
+ return failure();
+ builder.create<BlockReturnOp>(opLoc, *returnOperands);
+ LDBG() << "end of parsing of a block";
+ return bodyParsingRes->endingByte;
+ }
+
public:
template <std::byte ParseEndByte = WasmBinaryEncoding::endByte>
parsed_inst_t parse(OpBuilder &builder, UniqueByte<ParseEndByte> = {});
@@ -294,7 +420,11 @@ public:
parse(OpBuilder &builder,
ByteSequence<ExpressionParseEnd...> parsingEndFilters);
- FailureOr<SmallVector<Value>> popOperands(TypeRange operandTypes) {
+ NestingContextGuard addNesting(LabelLevelOpInterface levelOp) {
+ return NestingContextGuard{*this, levelOp};
+ }
+
+ FailureOr<llvm::SmallVector<Value>> popOperands(TypeRange operandTypes) {
return valueStack.popOperands(operandTypes, &currentOpLoc.value());
}
@@ -308,6 +438,12 @@ public:
template <typename OpToCreate>
parsed_inst_t parseSetOrTee(OpBuilder &);
+ /// Blocks and Loops have a similar format and differ only in how their exit
+ /// is handled which doesn´t matter at parsing time. Factorizes in one
+ /// function.
+ template <typename OpToCreate>
+ parsed_inst_t parseBlockLikeOp(OpBuilder &);
+
private:
std::optional<Location> currentOpLoc;
ParserHead &parser;
@@ -586,6 +722,29 @@ public:
return success();
}
+ llvm::FailureOr<BlockTypeParseResult> parseBlockType(MLIRContext *ctx) {
+ auto loc = getLocation();
+ auto blockIndicator = peek();
+ if (failed(blockIndicator))
+ return failure();
+ if (*blockIndicator == WasmBinaryEncoding::Type::emptyBlockType) {
+ offset += 1;
+ return {EmptyBlockMarker{}};
+ }
+ if (isValueOneOf(*blockIndicator, valueTypesEncodings))
+ return parseValueType(ctx);
+ /// Block type idx is a 32 bit positive integer encoded as a 33 bit signed
+ /// value
+ auto typeIdx = parseI64();
+ if (failed(typeIdx))
+ return failure();
+ if (*typeIdx < 0 || *typeIdx > std::numeric_limits<uint32_t>::max())
+ return emitError(loc, "type ID should be representable with an unsigned "
+ "32 bits integer. Got ")
+ << *typeIdx;
+ return {TypeIdxRecord{static_cast<uint32_t>(*typeIdx)}};
+ }
+
bool end() const { return curHead().empty(); }
ParserHead copy() const { return *this; }
@@ -701,17 +860,41 @@ inline parsed_inst_t ExpressionParser::parseSpecificInstruction(OpBuilder &) {
void ValueStack::dump() const {
llvm::dbgs() << "================= Wasm ValueStack =======================\n";
llvm::dbgs() << "size: " << size() << "\n";
+ llvm::dbgs() << "nbFrames: " << labelLevel.size() << '\n';
llvm::dbgs() << "<Top>"
<< "\n";
// Stack is pushed to via push_back. Therefore the top of the stack is the
// end of the vector. Iterate in reverse so that the first thing we print
// is the top of the stack.
+ auto indexGetter = [this]() {
+ size_t idx = labelLevel.size();
+ return [this, idx]() mutable -> std::optional<std::pair<size_t, size_t>> {
+ llvm::dbgs() << "IDX: " << idx << '\n';
+ if (idx == 0)
+ return std::nullopt;
+ auto frameId = idx - 1;
+ auto frameLimit = labelLevel[frameId].stackIdx;
+ idx -= 1;
+ return {{frameId, frameLimit}};
+ };
+ };
+ auto getNextFrameIndex = indexGetter();
+ auto nextFrameIdx = getNextFrameIndex();
size_t stackSize = size();
- for (size_t idx = 0; idx < stackSize; idx++) {
+ for (size_t idx = 0; idx < stackSize; ++idx) {
size_t actualIdx = stackSize - 1 - idx;
+ while (nextFrameIdx && (nextFrameIdx->second > actualIdx)) {
+ llvm::dbgs() << " --------------- Frame (" << nextFrameIdx->first
+ << ")\n";
+ nextFrameIdx = getNextFrameIndex();
+ }
llvm::dbgs() << " ";
values[actualIdx].dump();
}
+ while (nextFrameIdx) {
+ llvm::dbgs() << " --------------- Frame (" << nextFrameIdx->first << ")\n";
+ nextFrameIdx = getNextFrameIndex();
+ }
llvm::dbgs() << "<Bottom>"
<< "\n";
llvm::dbgs() << "=========================================================\n";
@@ -726,7 +909,7 @@ parsed_inst_t ValueStack::popOperands(TypeRange operandTypes, Location *opLoc) {
return emitError(*opLoc,
"stack doesn't contain enough values. trying to get ")
<< operandTypes.size() << " operands on a stack containing only "
- << values.size() << " values.";
+ << values.size() << " values";
size_t stackIdxOffset = values.size() - operandTypes.size();
SmallVector<Value> res{};
res.reserve(operandTypes.size());
@@ -735,8 +918,7 @@ parsed_inst_t ValueStack::popOperands(TypeRange operandTypes, Location *opLoc) {
Type stackType = operand.getType();
if (stackType != operandTypes[i])
return emitError(*opLoc, "invalid operand type on stack. expecting ")
- << operandTypes[i] << ", value on stack is of type " << stackType
- << ".";
+ << operandTypes[i] << ", value on stack is of type " << stackType;
LDBG() << " POP: " << operand;
res.push_back(operand);
}
@@ -792,6 +974,151 @@ ExpressionParser::parse(OpBuilder &builder,
}
}
+llvm::FailureOr<FunctionType>
+ExpressionParser::parseBlockFuncType(OpBuilder &builder) {
+ return getFuncTypeFor(builder, parser.parseBlockType(builder.getContext()));
+}
+
+template <typename OpToCreate>
+parsed_inst_t ExpressionParser::parseBlockLikeOp(OpBuilder &builder) {
+ auto opLoc = currentOpLoc;
+ auto funcType = parseBlockFuncType(builder);
+ if (failed(funcType))
+ return failure();
+
+ auto inputTypes = funcType->getInputs();
+ auto inputOps = popOperands(inputTypes);
+ if (failed(inputOps))
+ return failure();
+
+ Block *curBlock = builder.getBlock();
+ Region *curRegion = curBlock->getParent();
+ auto resTypes = funcType->getResults();
+ llvm::SmallVector<Location> locations{};
+ locations.resize(resTypes.size(), *currentOpLoc);
+ auto *successor =
+ builder.createBlock(curRegion, curRegion->end(), resTypes, locations);
+ builder.setInsertionPointToEnd(curBlock);
+ auto blockOp =
+ builder.create<OpToCreate>(*currentOpLoc, *inputOps, successor);
+ auto *blockBody = blockOp.createBlock();
+ if (failed(parseBlockContent(builder, blockBody, resTypes, *opLoc, blockOp)))
+ return failure();
+ builder.setInsertionPointToStart(successor);
+ return {ValueRange{successor->getArguments()}};
+}
+
+template <>
+inline parsed_inst_t
+ExpressionParser::parseSpecificInstruction<WasmBinaryEncoding::OpCode::block>(
+ OpBuilder &builder) {
+ return parseBlockLikeOp<BlockOp>(builder);
+}
+
+template <>
+inline parsed_inst_t
+ExpressionParser::parseSpecificInstruction<WasmBinaryEncoding::OpCode::loop>(
+ OpBuilder &builder) {
+ return parseBlockLikeOp<LoopOp>(builder);
+}
+
+template <>
+inline parsed_inst_t ExpressionParser::parseSpecificInstruction<
+ WasmBinaryEncoding::OpCode::ifOpCode>(OpBuilder &builder) {
+ auto opLoc = currentOpLoc;
+ auto funcType = parseBlockFuncType(builder);
+ if (failed(funcType))
+ return failure();
+
+ LDBG() << "Parsing an if instruction of type " << *funcType;
+ auto inputTypes = funcType->getInputs();
+ auto conditionValue = popOperands(builder.getI32Type());
+ if (failed(conditionValue))
+ return failure();
+ auto inputOps = popOperands(inputTypes);
+ if (failed(inputOps))
+ return failure();
+
+ Block *curBlock = builder.getBlock();
+ Region *curRegion = curBlock->getParent();
+ auto resTypes = funcType->getResults();
+ llvm::SmallVector<Location> locations{};
+ locations.resize(resTypes.size(), *currentOpLoc);
+ auto *successor =
+ builder.createBlock(curRegion, curRegion->end(), resTypes, locations);
+ builder.setInsertionPointToEnd(curBlock);
+ auto ifOp = builder.create<IfOp>(*currentOpLoc, conditionValue->front(),
+ *inputOps, successor);
+ auto *ifEntryBlock = ifOp.createIfBlock();
+ constexpr auto ifElseFilter =
+ ByteSequence<WasmBinaryEncoding::endByte,
+ WasmBinaryEncoding::OpCode::elseOpCode>{};
+ auto parseIfRes = parseBlockContent(builder, ifEntryBlock, resTypes, *opLoc,
+ ifOp, ifElseFilter);
+ if (failed(parseIfRes))
+ return failure();
+ if (*parseIfRes == WasmBinaryEncoding::OpCode::elseOpCode) {
+ LDBG() << " else block is present.";
+ Block *elseEntryBlock = ifOp.createElseBlock();
+ auto parseElseRes =
+ parseBlockContent(builder, elseEntryBlock, resTypes, *opLoc, ifOp);
+ if (failed(parseElseRes))
+ return failure();
+ }
+ builder.setInsertionPointToStart(successor);
+ return {ValueRange{successor->getArguments()}};
+}
+
+template <>
+inline parsed_inst_t ExpressionParser::parseSpecificInstruction<
+ WasmBinaryEncoding::OpCode::branchIf>(OpBuilder &builder) {
+ auto level = parser.parseLiteral<uint32_t>();
+ if (failed(level))
+ return failure();
+ Block *curBlock = builder.getBlock();
+ Region *curRegion = curBlock->getParent();
+ auto sip = builder.saveInsertionPoint();
+ Block *elseBlock = builder.createBlock(curRegion, curRegion->end());
+ auto condition = popOperands(builder.getI32Type());
+ if (failed(condition))
+ return failure();
+ builder.restoreInsertionPoint(sip);
+ auto targetOp =
+ LabelBranchingOpInterface::getTargetOpFromBlock(curBlock, *level);
+ if (failed(targetOp))
+ return failure();
+ auto inputTypes = targetOp->getLabelTarget()->getArgumentTypes();
+ auto branchArgs = popOperands(inputTypes);
+ if (failed(branchArgs))
+ return failure();
+ builder.create<BranchIfOp>(*currentOpLoc, condition->front(),
+ builder.getUI32IntegerAttr(*level), *branchArgs,
+ elseBlock);
+ builder.setInsertionPointToStart(elseBlock);
+ return {*branchArgs};
+}
+
+template <>
+inline parsed_inst_t
+ExpressionParser::parseSpecificInstruction<WasmBinaryEncoding::OpCode::call>(
+ OpBuilder &builder) {
+ auto loc = *currentOpLoc;
+ auto funcIdx = parser.parseLiteral<uint32_t>();
+ if (failed(funcIdx))
+ return failure();
+ if (*funcIdx >= symbols.funcSymbols.size())
+ return emitError(loc, "Invalid function index: ") << *funcIdx;
+ auto callee = symbols.funcSymbols[*funcIdx];
+ llvm::ArrayRef<Type> inTypes = callee.functionType.getInputs();
+ llvm::ArrayRef<Type> resTypes = callee.functionType.getResults();
+ parsed_inst_t inOperands = popOperands(inTypes);
+ if (failed(inOperands))
+ return failure();
+ auto callOp =
+ builder.create<FuncCallOp>(loc, resTypes, callee.symbol, *inOperands);
+ return {callOp.getResults()};
+}
+
template <>
inline parsed_inst_t ExpressionParser::parseSpecificInstruction<
WasmBinaryEncoding::OpCode::localGet>(OpBuilder &builder) {
@@ -834,7 +1161,7 @@ parsed_inst_t ExpressionParser::parseSetOrTee(OpBuilder &builder) {
if (valueStack.empty())
return emitError(
*currentOpLoc,
- "invalid stack access, trying to access a value on an empty stack.");
+ "invalid stack access, trying to access a value on an empty stack");
parsed_inst_t poppedOp = popOperands(locals[*id].getType().getElementType());
if (failed(poppedOp))
@@ -1000,11 +1327,23 @@ inline parsed_inst_t ExpressionParser::buildNumericOp(
BUILD_NUMERIC_BINOP_FP(CopySignOp, copysign)
BUILD_NUMERIC_BINOP_FP(DivOp, div)
+BUILD_NUMERIC_BINOP_FP(GeOp, ge)
+BUILD_NUMERIC_BINOP_FP(GtOp, gt)
+BUILD_NUMERIC_BINOP_FP(LeOp, le)
+BUILD_NUMERIC_BINOP_FP(LtOp, lt)
BUILD_NUMERIC_BINOP_FP(MaxOp, max)
BUILD_NUMERIC_BINOP_FP(MinOp, min)
BUILD_NUMERIC_BINOP_INT(AndOp, and)
BUILD_NUMERIC_BINOP_INT(DivSIOp, divS)
BUILD_NUMERIC_BINOP_INT(DivUIOp, divU)
+BUILD_NUMERIC_BINOP_INT(GeSIOp, geS)
+BUILD_NUMERIC_BINOP_INT(GeUIOp, geU)
+BUILD_NUMERIC_BINOP_INT(GtSIOp, gtS)
+BUILD_NUMERIC_BINOP_INT(GtUIOp, gtU)
+BUILD_NUMERIC_BINOP_INT(LeSIOp, leS)
+BUILD_NUMERIC_BINOP_INT(LeUIOp, leU)
+BUILD_NUMERIC_BINOP_INT(LtSIOp, ltS)
+BUILD_NUMERIC_BINOP_INT(LtUIOp, ltU)
BUILD_NUMERIC_BINOP_INT(OrOp, or)
BUILD_NUMERIC_BINOP_INT(RemSIOp, remS)
BUILD_NUMERIC_BINOP_INT(RemUIOp, remU)
@@ -1015,7 +1354,9 @@ BUILD_NUMERIC_BINOP_INT(ShRSOp, shrS)
BUILD_NUMERIC_BINOP_INT(ShRUOp, shrU)
BUILD_NUMERIC_BINOP_INT(XOrOp, xor)
BUILD_NUMERIC_BINOP_INTFP(AddOp, add)
+BUILD_NUMERIC_BINOP_INTFP(EqOp, eq)
BUILD_NUMERIC_BINOP_INTFP(MulOp, mul)
+BUILD_NUMERIC_BINOP_INTFP(NeOp, ne)
BUILD_NUMERIC_BINOP_INTFP(SubOp, sub)
BUILD_NUMERIC_UNARY_OP_FP(AbsOp, abs)
BUILD_NUMERIC_UNARY_OP_FP(CeilOp, ceil)
@@ -1025,6 +1366,7 @@ BUILD_NUMERIC_UNARY_OP_FP(SqrtOp, sqrt)
BUILD_NUMERIC_UNARY_OP_FP(TruncOp, trunc)
BUILD_NUMERIC_UNARY_OP_INT(ClzOp, clz)
BUILD_NUMERIC_UNARY_OP_INT(CtzOp, ctz)
+BUILD_NUMERIC_UNARY_OP_INT(EqzOp, eqz)
BUILD_NUMERIC_UNARY_OP_INT(PopCntOp, popcnt)
// Don't need these anymore so let's undef them.
@@ -1036,6 +1378,105 @@ BUILD_NUMERIC_UNARY_OP_INT(PopCntOp, popcnt)
#undef BUILD_NUMERIC_OP
#undef BUILD_NUMERIC_CAST_OP
+template <typename opType, typename inputType, typename outputType,
+ typename... extraArgsT>
+inline parsed_inst_t ExpressionParser::buildConvertOp(OpBuilder &builder,
+ extraArgsT... extraArgs) {
+ static_assert(std::is_arithmetic_v<inputType>,
+ "InputType should be an arithmetic type");
+ static_assert(std::is_arithmetic_v<outputType>,
+ "OutputType should be an arithmetic type");
+ auto intype = buildLiteralType<inputType>(builder);
+ auto outType = buildLiteralType<outputType>(builder);
+ auto operand = popOperands(intype);
+ if (failed(operand))
+ return failure();
+ auto op = builder.create<opType>(*currentOpLoc, outType, operand->front(),
+ extraArgs...);
+ LDBG() << "Built operation: " << op;
+ return {{op.getResult()}};
+}
+
+template <>
+inline parsed_inst_t ExpressionParser::parseSpecificInstruction<
+ WasmBinaryEncoding::OpCode::demoteF64ToF32>(OpBuilder &builder) {
+ return buildConvertOp<DemoteOp, double, float>(builder);
+}
+
+template <>
+inline parsed_inst_t
+ExpressionParser::parseSpecificInstruction<WasmBinaryEncoding::OpCode::wrap>(
+ OpBuilder &builder) {
+ return buildConvertOp<WrapOp, int64_t, int32_t>(builder);
+}
+
+#define BUILD_CONVERSION_OP(IN_T, OUT_T, SOURCE_OP, TARGET_OP) \
+ template <> \
+ inline parsed_inst_t ExpressionParser::parseSpecificInstruction< \
+ WasmBinaryEncoding::OpCode::SOURCE_OP>(OpBuilder & builder) { \
+ return buildConvertOp<TARGET_OP, IN_T, OUT_T>(builder); \
+ }
+
+#define BUILD_CONVERT_OP_FOR(DEST_T, WIDTH) \
+ BUILD_CONVERSION_OP(uint32_t, DEST_T, convertUI32F##WIDTH, ConvertUOp) \
+ BUILD_CONVERSION_OP(int32_t, DEST_T, convertSI32F##WIDTH, ConvertSOp) \
+ BUILD_CONVERSION_OP(uint64_t, DEST_T, convertUI64F##WIDTH, ConvertUOp) \
+ BUILD_CONVERSION_OP(int64_t, DEST_T, convertSI64F##WIDTH, ConvertSOp)
+
+BUILD_CONVERT_OP_FOR(float, 32)
+BUILD_CONVERT_OP_FOR(double, 64)
+
+#undef BUILD_CONVERT_OP_FOR
+
+BUILD_CONVERSION_OP(int32_t, int64_t, extendS, ExtendSI32Op)
+BUILD_CONVERSION_OP(int32_t, int64_t, extendU, ExtendUI32Op)
+
+#undef BUILD_CONVERSION_OP
+
+#define BUILD_SLICE_EXTEND_PARSER(IT_WIDTH, EXTRACT_WIDTH) \
+ template <> \
+ parsed_inst_t ExpressionParser::parseSpecificInstruction< \
+ WasmBinaryEncoding::OpCode::extendI##IT_WIDTH##EXTRACT_WIDTH##S>( \
+ OpBuilder & builder) { \
+ using inout_t = int##IT_WIDTH##_t; \
+ auto attr = builder.getUI32IntegerAttr(EXTRACT_WIDTH); \
+ return buildConvertOp<ExtendLowBitsSOp, inout_t, inout_t>(builder, attr); \
+ }
+
+BUILD_SLICE_EXTEND_PARSER(32, 8)
+BUILD_SLICE_EXTEND_PARSER(32, 16)
+BUILD_SLICE_EXTEND_PARSER(64, 8)
+BUILD_SLICE_EXTEND_PARSER(64, 16)
+BUILD_SLICE_EXTEND_PARSER(64, 32)
+
+#undef BUILD_SLICE_EXTEND_PARSER
+
+template <>
+inline parsed_inst_t ExpressionParser::parseSpecificInstruction<
+ WasmBinaryEncoding::OpCode::promoteF32ToF64>(OpBuilder &builder) {
+ return buildConvertOp<PromoteOp, float, double>(builder);
+}
+
+#define BUILD_REINTERPRET_PARSER(WIDTH, FP_TYPE) \
+ template <> \
+ inline parsed_inst_t ExpressionParser::parseSpecificInstruction< \
+ WasmBinaryEncoding::OpCode::reinterpretF##WIDTH##AsI##WIDTH>(OpBuilder & \
+ builder) { \
+ return buildConvertOp<ReinterpretOp, FP_TYPE, int##WIDTH##_t>(builder); \
+ } \
+ \
+ template <> \
+ inline parsed_inst_t ExpressionParser::parseSpecificInstruction< \
+ WasmBinaryEncoding::OpCode::reinterpretI##WIDTH##AsF##WIDTH>(OpBuilder & \
+ builder) { \
+ return buildConvertOp<ReinterpretOp, int##WIDTH##_t, FP_TYPE>(builder); \
+ }
+
+BUILD_REINTERPRET_PARSER(32, float)
+BUILD_REINTERPRET_PARSER(64, double)
+
+#undef BUILD_REINTERPRET_PARSER
+
class WasmBinaryParser {
private:
struct SectionRegistry {
@@ -1153,7 +1594,7 @@ private:
if (tid.id >= symbols.moduleFuncTypes.size())
return emitError(loc, "invalid type id: ")
<< tid.id << ". Only " << symbols.moduleFuncTypes.size()
- << " type registration.";
+ << " type registrations";
FunctionType type = symbols.moduleFuncTypes[tid.id];
std::string symbol = symbols.getNewFuncSymbolName();
auto funcOp = FuncImportOp::create(builder, loc, symbol, moduleName,
@@ -1221,7 +1662,7 @@ public:
FileLineColLoc magicLoc = parser.getLocation();
FailureOr<StringRef> magic = parser.consumeNBytes(wasmHeader.size());
if (failed(magic) || magic->compare(wasmHeader)) {
- emitError(magicLoc, "source file does not contain valid Wasm header.");
+ emitError(magicLoc, "source file does not contain valid Wasm header");
return;
}
auto const expectedVersionString = StringRef{"\1\0\0\0", 4};
@@ -1391,7 +1832,7 @@ WasmBinaryParser::parseSectionItem<WasmSectionType::EXPORT>(ParserHead &ph,
return failure();
Operation *op = SymbolTable::lookupSymbolIn(mOp, *currentSymbol);
- SymbolTable::setSymbolVisibility(op, SymbolTable::Visibility::Public);
+ op->setAttr("exported", UnitAttr::get(op->getContext()));
StringAttr symName = SymbolTable::getSymbolName(op);
return SymbolTable{mOp}.rename(symName, *exportName);
}
diff --git a/mlir/python/mlir/dialects/transform/smt.py b/mlir/python/mlir/dialects/transform/smt.py
index 1f0b7f0..af88fff 100644
--- a/mlir/python/mlir/dialects/transform/smt.py
+++ b/mlir/python/mlir/dialects/transform/smt.py
@@ -19,6 +19,7 @@ except ImportError as e:
class ConstrainParamsOp(ConstrainParamsOp):
def __init__(
self,
+ results: Sequence[Type],
params: Sequence[transform.AnyParamType],
arg_types: Sequence[Type],
loc=None,
@@ -27,6 +28,7 @@ class ConstrainParamsOp(ConstrainParamsOp):
if len(params) != len(arg_types):
raise ValueError(f"{params=} not same length as {arg_types=}")
super().__init__(
+ results,
params,
loc=loc,
ip=ip,
@@ -36,3 +38,13 @@ class ConstrainParamsOp(ConstrainParamsOp):
@property
def body(self) -> Block:
return self.regions[0].blocks[0]
+
+
+def constrain_params(
+ results: Sequence[Type],
+ params: Sequence[transform.AnyParamType],
+ arg_types: Sequence[Type],
+ loc=None,
+ ip=None,
+):
+ return ConstrainParamsOp(results, params, arg_types, loc=loc, ip=ip)
diff --git a/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir b/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir
index dbff233..455f886 100644
--- a/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir
+++ b/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir
@@ -1,4 +1,5 @@
-// RUN: mlir-opt %s -convert-math-to-rocdl -allow-unregistered-dialect -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -allow-unregistered-dialect -split-input-file -pass-pipeline='builtin.module(convert-math-to-rocdl{chipset=gfx803})' | FileCheck %s --check-prefix=PRE9
+// RUN: mlir-opt %s -allow-unregistered-dialect -split-input-file -pass-pipeline='builtin.module(convert-math-to-rocdl{chipset=gfx942})' | FileCheck %s --check-prefix=POST9
module @test_module {
// CHECK: llvm.func @__ocml_fmod_f16(f16, f16) -> f16
@@ -596,3 +597,76 @@ module @test_module {
func.return %result : vector<2x2xf16>
}
}
+
+// -----
+
+// f16 clamp → rocdl.fmed3 on gfx9+
+// CHECK-LABEL: func.func @clampf_f16
+func.func @clampf_f16(%x: f16, %lo: f16, %hi: f16) -> f16 {
+ %r = math.clampf %x to [%lo, %hi] : f16
+ return %r : f16
+ // POST9: rocdl.fmed3 {{.*}} : f16
+ // PRE9-NOT: rocdl.fmed3
+ // PRE9: math.clampf {{.*}} : f16
+}
+
+// f32 clamp → rocdl.fmed3 on gfx9+
+// CHECK-LABEL: func.func @clampf_f32
+func.func @clampf_f32(%x: f32, %lo: f32, %hi: f32) -> f32 {
+ %r = math.clampf %x to [%lo, %hi] : f32
+ return %r : f32
+ // POST9: rocdl.fmed3 {{.*}} : f32
+ // PRE9-NOT: rocdl.fmed3
+ // PRE9: math.clampf {{.*}} : f32
+}
+
+// -----
+
+// Vector f16 clamp → rocdl.fmed3 on gfx9+
+// CHECK-LABEL: func.func @clampf_vector_f16
+func.func @clampf_vector_f16(%x: vector<2xf16>, %lo: vector<2xf16>, %hi: vector<2xf16>) -> vector<2xf16> {
+ %r = math.clampf %x to [%lo, %hi] : vector<2xf16>
+ return %r : vector<2xf16>
+ // POST9: rocdl.fmed3 {{.*}} : vector<2xf16>
+ // PRE9-NOT: rocdl.fmed3
+ // PRE9: math.clampf {{.*}} : vector<2xf16>
+}
+
+// -----
+
+// Vector f32 clamp → rocdl.fmed3 on gfx9+
+// CHECK-LABEL: func.func @clampf_vector_f32
+func.func @clampf_vector_f32(%x: vector<2xf32>, %lo: vector<2xf32>, %hi: vector<2xf32>) -> vector<2xf32> {
+ %r = math.clampf %x to [%lo, %hi] : vector<2xf32>
+ return %r : vector<2xf32>
+ // POST9: rocdl.fmed3 {{.*}} : vector<2xf32>
+ // PRE9-NOT: rocdl.fmed3
+ // PRE9: math.clampf {{.*}} : vector<2xf32>
+}
+
+// -----
+
+// Multi-dimensional vector f16 clamp → rocdl.fmed3 on gfx9+ (unrolled to 1D vectors)
+// CHECK-LABEL: func.func @clampf_vector_2d_f16
+func.func @clampf_vector_2d_f16(%x: vector<2x2xf16>, %lo: vector<2x2xf16>, %hi: vector<2x2xf16>) -> vector<2x2xf16> {
+ %r = math.clampf %x to [%lo, %hi] : vector<2x2xf16>
+ return %r : vector<2x2xf16>
+ // POST9: builtin.unrealized_conversion_cast {{.*}} : vector<2x2xf16> to !llvm.array<2 x vector<2xf16>>
+ // POST9: llvm.extractvalue {{.*}} : !llvm.array<2 x vector<2xf16>>
+ // POST9: rocdl.fmed3 {{.*}} : vector<2xf16>
+ // POST9: llvm.insertvalue {{.*}} : !llvm.array<2 x vector<2xf16>>
+ // POST9: llvm.extractvalue {{.*}} : !llvm.array<2 x vector<2xf16>>
+ // POST9: rocdl.fmed3 {{.*}} : vector<2xf16>
+ // POST9: llvm.insertvalue {{.*}} : !llvm.array<2 x vector<2xf16>>
+ // PRE9-NOT: rocdl.fmed3
+ // PRE9: math.clampf {{.*}} : vector<2x2xf16>
+}
+
+// -----
+// CHECK-LABEL: func.func @clampf_bf16
+func.func @clampf_bf16(%x: bf16, %lo: bf16, %hi: bf16) -> bf16 {
+ %r = math.clampf %x to [%lo, %hi] : bf16
+ return %r : bf16
+ // CHECK: math.clampf {{.*}} : bf16
+ // CHECK-NOT: rocdl.fmed3
+}
diff --git a/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir b/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir
index d76627b..c61640c 100644
--- a/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir
+++ b/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir
@@ -3,6 +3,15 @@
// RUN: mlir-opt %s -convert-math-to-xevm='convert-arith=false' \
// RUN: | FileCheck %s -check-prefixes='CHECK,CHECK-NO-ARITH'
+// RUN: mlir-opt --pass-pipeline="builtin.module(convert-math-to-xevm)" %s \
+// RUN: | FileCheck %s -check-prefixes='CHECK-MODULE,CHECK-ENTIRE-MODULE'
+// RUN: mlir-opt --pass-pipeline="builtin.module(gpu.module(convert-math-to-xevm))" %s \
+// RUN: | FileCheck %s -check-prefixes='CHECK-MODULE,CHECK-ONLY-GPU'
+
+// This test:
+// - check that MathToXeVM converts fastmath math/arith ops properly;
+// - check that MathToXeVM handles nested modules while respecting pass manager.
+
module @test_module {
// CHECK-DAG: llvm.func @_Z22__spirv_ocl_native_expDh(f16) -> f16
// CHECK-DAG: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
@@ -152,4 +161,39 @@ module @test_module {
return
}
+
+ // Check that MathToXeVM handles nested modules while respecting pass manager:
+
+ // CHECK-ENTIRE-MODULE: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+ // CHECK-ONLY-GPU-NOT: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+
+ // CHECK-MODULE-LABEL: @test_gpu
+ gpu.module @test_gpu {
+ // CHECK-MODULE: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+ gpu.func @exp_gpu() {
+ %c1_f32 = arith.constant 1. : f32
+
+ // CHECK-MODULE: math.exp
+ %exp_normal_f32 = math.exp %c1_f32 : f32
+
+ // CHECK-MODULE: llvm.call @_Z22__spirv_ocl_native_expf(%{{.*}}) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32
+ %exp_fast_f32 = math.exp %c1_f32 fastmath<afn> : f32
+
+ gpu.return
+ }
+ }
+
+ // CHECK-MODULE-LABEL: @exp_func
+ func.func @exp_func() {
+ %c1_f32 = arith.constant 1. : f32
+
+ // CHECK-MODULE: math.exp
+ %exp_normal_f32 = math.exp %c1_f32 : f32
+
+ // CHECK-ENTIRE-MODULE: llvm.call @_Z22__spirv_ocl_native_expf(%{{.*}}) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32
+ // CHECK-ONLY-GPU: math.exp
+ %exp_fast_f32 = math.exp %c1_f32 fastmath<afn> : f32
+
+ return
+ }
}
diff --git a/mlir/test/Dialect/AMDGPU/invalid.mlir b/mlir/test/Dialect/AMDGPU/invalid.mlir
index 66e7dd4..a8256b1 100644
--- a/mlir/test/Dialect/AMDGPU/invalid.mlir
+++ b/mlir/test/Dialect/AMDGPU/invalid.mlir
@@ -238,3 +238,27 @@ func.func @gather_to_lds_non_lds(%idx1 : index, %mem1 : memref<32xf16>, %mem2 :
amdgpu.gather_to_lds %mem1[%idx1], %mem2[%idx1] : vector<2xf16>, memref<32xf16>, memref<32xf16, strided<[?]>, #gpu.address_space<workgroup>>
func.return
}
+
+// -----
+
+func.func @amdgpu.scaled_ext_packed816_invalid_block_size_and_first_scale_byte_16(%v: vector<8xf8E5M2>, %scale: vector<4xf8E8M0FNU>) {
+ // expected-error@+1 {{'amdgpu.scaled_ext_packed816' op blockSize of 16 can only have firstScaleByte be 0 or 1.}}
+ %ret0 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(16) firstScaleLane(0) firstScaleByte(2) : vector<8xf8E5M2>, vector<4xf8E8M0FNU> -> vector<8xf16>
+ func.return
+}
+
+// -----
+
+func.func @amdgpu.scaled_ext_packed816_invalid_block_size_and_first_scale_byte_32(%v: vector<8xf8E5M2>, %scale: vector<4xf8E8M0FNU>) {
+ // expected-error@+1 {{'amdgpu.scaled_ext_packed816' op blockSize of 32 can only have firstScaleByte be 0 or 2.}}
+ %ret0 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(1) : vector<8xf8E5M2>, vector<4xf8E8M0FNU> -> vector<8xf16>
+ func.return
+}
+
+// -----
+
+func.func @amdgpu.scaled_ext_packed816_invalid_input_output_sizes(%v: vector<8xf8E5M2>, %scale: vector<4xf8E8M0FNU>) {
+ // expected-error@+1 {{'amdgpu.scaled_ext_packed816' op failed to verify that all of {source, res} have same shape}}
+ %ret0 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(16) firstScaleLane(0) firstScaleByte(0) : vector<8xf8E5M2>, vector<4xf8E8M0FNU> -> vector<16xf16>
+ func.return
+}
diff --git a/mlir/test/Dialect/AMDGPU/ops.mlir b/mlir/test/Dialect/AMDGPU/ops.mlir
index 8f427e9..f9c6899 100644
--- a/mlir/test/Dialect/AMDGPU/ops.mlir
+++ b/mlir/test/Dialect/AMDGPU/ops.mlir
@@ -221,6 +221,61 @@ func.func @scaled_ext_scalar_f4e2m1_bf16(%v: vector<2xf4E2M1FN>, %scale: f32) ->
func.return %ret : vector<2xbf16>
}
+// CHECK-LABEL: func.func @scaled_ext_packed816_fp4
+func.func @scaled_ext_packed816_fp4(%v: vector<8xf4E2M1FN>, %scale: vector<4xf8E8M0FNU>) -> (vector<8xf16>, vector<8xbf16>, vector<8xf32>) {
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret0 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<8xf4E2M1FN>, vector<4xf8E8M0FNU> -> vector<8xf16>
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret1 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<8xf4E2M1FN>, vector<4xf8E8M0FNU> -> vector<8xbf16>
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret2 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<8xf4E2M1FN>, vector<4xf8E8M0FNU> -> vector<8xf32>
+ func.return %ret0, %ret1, %ret2 : vector<8xf16>, vector<8xbf16>, vector<8xf32>
+}
+
+// CHECK-LABEL: func.func @scaled_ext_packed816_fp8
+func.func @scaled_ext_packed816_fp8(%v: vector<8xf8E4M3FN>, %scale: vector<4xf8E8M0FNU>) -> (vector<8xf16>, vector<8xbf16>, vector<8xf32>) {
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret0 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<8xf8E4M3FN>, vector<4xf8E8M0FNU> -> vector<8xf16>
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret1 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<8xf8E4M3FN>, vector<4xf8E8M0FNU> -> vector<8xbf16>
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret2 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<8xf8E4M3FN>, vector<4xf8E8M0FNU> -> vector<8xf32>
+ func.return %ret0, %ret1, %ret2 : vector<8xf16>, vector<8xbf16>, vector<8xf32>
+}
+
+// CHECK-LABEL: func.func @scaled_ext_packed816_bf8
+func.func @scaled_ext_packed816_bf8(%v: vector<8xf8E5M2>, %scale: vector<4xf8E8M0FNU>) -> (vector<8xf16>, vector<8xbf16>, vector<8xf32>) {
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret0 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<8xf8E5M2>, vector<4xf8E8M0FNU> -> vector<8xf16>
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret1 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<8xf8E5M2>, vector<4xf8E8M0FNU> -> vector<8xbf16>
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret2 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<8xf8E5M2>, vector<4xf8E8M0FNU> -> vector<8xf32>
+ func.return %ret0, %ret1, %ret2 : vector<8xf16>, vector<8xbf16>, vector<8xf32>
+}
+
+// CHECK-LABEL: func.func @scaled_ext_packed816_fp6
+func.func @scaled_ext_packed816_fp6(%v: vector<16xf6E2M3FN>, %scale: vector<4xf8E8M0FNU>) -> (vector<16xf16>, vector<16xbf16>, vector<16xf32>) {
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret0 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<16xf6E2M3FN>, vector<4xf8E8M0FNU> -> vector<16xf16>
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret1 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<16xf6E2M3FN>, vector<4xf8E8M0FNU> -> vector<16xbf16>
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret2 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<16xf6E2M3FN>, vector<4xf8E8M0FNU> -> vector<16xf32>
+ func.return %ret0, %ret1, %ret2 : vector<16xf16>, vector<16xbf16>, vector<16xf32>
+}
+
+// CHECK-LABEL: func.func @scaled_ext_packed816_bf16
+func.func @scaled_ext_packed816_bf16(%v: vector<16xf6E3M2FN>, %scale: vector<4xf8E8M0FNU>) -> (vector<16xf16>, vector<16xbf16>, vector<16xf32>) {
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret0 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<16xf6E3M2FN>, vector<4xf8E8M0FNU> -> vector<16xf16>
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret1 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<16xf6E3M2FN>, vector<4xf8E8M0FNU> -> vector<16xbf16>
+ // CHECK: amdgpu.scaled_ext_packed816
+ %ret2 = amdgpu.scaled_ext_packed816 %v scale(%scale) blockSize(32) firstScaleLane(0) firstScaleByte(0) : vector<16xf6E3M2FN>, vector<4xf8E8M0FNU> -> vector<16xf32>
+ func.return %ret0, %ret1, %ret2 : vector<16xf16>, vector<16xbf16>, vector<16xf32>
+}
+
// CHECK-LABEL: func.func @packed_scaled_trunc_f8e4m3_f32
// CHECK: amdgpu.packed_scaled_trunc
func.func @packed_scaled_trunc_f8e4m3_f32(%v: vector<2xf32>, %scale: f32) -> vector<4xf8E4M3FN> {
diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir
index 242c04f..d270ee8 100644
--- a/mlir/test/Dialect/LLVMIR/rocdl.mlir
+++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir
@@ -1211,6 +1211,57 @@ llvm.func @rocdl.cvt.scale.pk16(%v3xi32: vector<3xi32>, %scale:i32) {
// -----
+// CHECK-LABEL: rocdl.cvt.scalef32.pk16
+llvm.func @rocdl.cvt.scalef32.pk16(%v16xf32: vector<16xf32>,
+ %v16xf16: vector<16xf16>,
+ %v16xbf16: vector<16xbf16>,
+ %scale: f32) {
+
+ // CHECK: rocdl.cvt.scalef32.pk16.fp6.f16
+ %0 = rocdl.cvt.scalef32.pk16.fp6.f16 %v16xf16, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.pk16.fp6.bf16
+ %1 = rocdl.cvt.scalef32.pk16.fp6.bf16 %v16xbf16, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.pk16.fp6.f32
+ %2 = rocdl.cvt.scalef32.pk16.fp6.f32 %v16xf32, %scale : vector<3xi32>
+
+ // CHECK: rocdl.cvt.scalef32.pk16.bf6.f16
+ %3 = rocdl.cvt.scalef32.pk16.bf6.f16 %v16xf16, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.pk16.bf6.bf16
+ %4 = rocdl.cvt.scalef32.pk16.bf6.bf16 %v16xbf16, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.pk16.bf6.f32
+ %5 = rocdl.cvt.scalef32.pk16.bf6.f32 %v16xf32, %scale : vector<3xi32>
+
+ llvm.return
+}
+
+// -----
+
+// CHECK-LABEL: rocdl.cvt.scalef32.sr.pk16
+llvm.func @rocdl.cvt.scalef32.sr.pk16(%v16xf32: vector<16xf32>,
+ %v16xf16: vector<16xf16>,
+ %v16xbf16: vector<16xbf16>,
+ %seed: i32,
+ %scale: f32) {
+
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.fp6.f16
+ %0 = rocdl.cvt.scalef32.sr.pk16.fp6.f16 %v16xf16, %seed, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.fp6.bf16
+ %1 = rocdl.cvt.scalef32.sr.pk16.fp6.bf16 %v16xbf16, %seed, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.fp6.f32
+ %2 = rocdl.cvt.scalef32.sr.pk16.fp6.f32 %v16xf32, %seed, %scale : vector<3xi32>
+
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.bf6.f16
+ %3 = rocdl.cvt.scalef32.sr.pk16.bf6.f16 %v16xf16, %seed, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.bf6.bf16
+ %4 = rocdl.cvt.scalef32.sr.pk16.bf6.bf16 %v16xbf16, %seed, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.bf6.f32
+ %5 = rocdl.cvt.scalef32.sr.pk16.bf6.f32 %v16xf32, %seed, %scale : vector<3xi32>
+
+ llvm.return
+}
+
+// -----
+
// expected-error@below {{attribute attached to unexpected op}}
func.func private @expected_llvm_func() attributes { rocdl.kernel }
diff --git a/mlir/test/Dialect/MemRef/canonicalize.mlir b/mlir/test/Dialect/MemRef/canonicalize.mlir
index 16b7a5c..7160b52 100644
--- a/mlir/test/Dialect/MemRef/canonicalize.mlir
+++ b/mlir/test/Dialect/MemRef/canonicalize.mlir
@@ -911,6 +911,21 @@ func.func @reinterpret_noop(%arg : memref<2x3x4xf32>) -> memref<2x3x4xf32> {
// -----
+// CHECK-LABEL: func @reinterpret_constant_fold
+// CHECK-SAME: (%[[ARG:.*]]: memref<f32>)
+// CHECK: %[[RES:.*]] = memref.reinterpret_cast %[[ARG]] to offset: [0], sizes: [100, 100], strides: [100, 1]
+// CHECK: %[[CAST:.*]] = memref.cast %[[RES]]
+// CHECK: return %[[CAST]]
+func.func @reinterpret_constant_fold(%arg0: memref<f32>) -> memref<?x?xf32, strided<[?, ?], offset: ?>> {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c100 = arith.constant 100 : index
+ %reinterpret_cast = memref.reinterpret_cast %arg0 to offset: [%c0], sizes: [%c100, %c100], strides: [%c100, %c1] : memref<f32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
+ return %reinterpret_cast : memref<?x?xf32, strided<[?, ?], offset: ?>>
+}
+
+// -----
+
// CHECK-LABEL: func @reinterpret_of_reinterpret
// CHECK-SAME: (%[[ARG:.*]]: memref<?xi8>, %[[SIZE1:.*]]: index, %[[SIZE2:.*]]: index)
// CHECK: %[[RES:.*]] = memref.reinterpret_cast %[[ARG]] to offset: [0], sizes: [%[[SIZE2]]], strides: [1]
@@ -996,10 +1011,9 @@ func.func @reinterpret_of_extract_strided_metadata_same_type(%arg0 : memref<?x?x
// when the strides don't match.
// CHECK-LABEL: func @reinterpret_of_extract_strided_metadata_w_different_stride
// CHECK-SAME: (%[[ARG:.*]]: memref<8x2xf32>)
-// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
-// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-// CHECK: %[[RES:.*]] = memref.reinterpret_cast %[[ARG]] to offset: [%[[C0]]], sizes: [4, 2, 2], strides: [1, 1, %[[C1]]]
-// CHECK: return %[[RES]]
+// CHECK: %[[RES:.*]] = memref.reinterpret_cast %[[ARG]] to offset: [0], sizes: [4, 2, 2], strides: [1, 1, 1]
+// CHECK: %[[CAST:.*]] = memref.cast %[[RES]]
+// CHECK: return %[[CAST]]
func.func @reinterpret_of_extract_strided_metadata_w_different_stride(%arg0 : memref<8x2xf32>) -> memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>> {
%base, %offset, %sizes:2, %strides:2 = memref.extract_strided_metadata %arg0 : memref<8x2xf32> -> memref<f32>, index, index, index, index, index
%m2 = memref.reinterpret_cast %base to offset: [%offset], sizes: [4, 2, 2], strides: [1, 1, %strides#1] : memref<f32> to memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -1011,11 +1025,9 @@ func.func @reinterpret_of_extract_strided_metadata_w_different_stride(%arg0 : me
// when the offset doesn't match.
// CHECK-LABEL: func @reinterpret_of_extract_strided_metadata_w_different_offset
// CHECK-SAME: (%[[ARG:.*]]: memref<8x2xf32>)
-// CHECK-DAG: %[[C8:.*]] = arith.constant 8 : index
-// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-// CHECK: %[[RES:.*]] = memref.reinterpret_cast %[[ARG]] to offset: [1], sizes: [%[[C8]], %[[C2]]], strides: [%[[C2]], %[[C1]]]
-// CHECK: return %[[RES]]
+// CHECK: %[[RES:.*]] = memref.reinterpret_cast %[[ARG]] to offset: [1], sizes: [8, 2], strides: [2, 1]
+// CHECK: %[[CAST:.*]] = memref.cast %[[RES]]
+// CHECK: return %[[CAST]]
func.func @reinterpret_of_extract_strided_metadata_w_different_offset(%arg0 : memref<8x2xf32>) -> memref<?x?xf32, strided<[?, ?], offset: ?>> {
%base, %offset, %sizes:2, %strides:2 = memref.extract_strided_metadata %arg0 : memref<8x2xf32> -> memref<f32>, index, index, index, index, index
%m2 = memref.reinterpret_cast %base to offset: [1], sizes: [%sizes#0, %sizes#1], strides: [%strides#0, %strides#1] : memref<f32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
diff --git a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
index b6c72be..f66cf7a 100644
--- a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
@@ -490,3 +490,32 @@ func.func @collapse_shape_regression(
tensor.collapse_shape %0[[0, 1]] : tensor<5x6xf32> into tensor<30xf32>
return
}
+
+// -----
+
+// CHECK-LABEL: func private @mult_return_callee(
+// CHECK-SAME: %[[T:.*]]: memref<?xf32, strided<[?], offset: ?>>, %[[COND:.*]]: i1,
+// CHECK-SAME: %[[A:.*]]: index, %[[B:.*]]: index) -> index {
+// CHECK: cf.cond_br %[[COND]], ^bb1, ^bb2
+// CHECK: ^bb1:
+// CHECK: return %[[A]] : index
+// CHECK: ^bb2:
+// CHECK: return %[[B]] : index
+func.func private @mult_return_callee(%t: tensor<?xf32>, %cond:i1, %a: index, %b: index) -> (tensor<10xf32>, index) {
+ %casted = tensor.cast %t : tensor<?xf32> to tensor<10xf32>
+ cf.cond_br %cond,^a, ^b
+^a:
+ return %casted, %a : tensor<10xf32>, index
+^b:
+ return %casted, %b : tensor<10xf32>, index
+}
+
+// CHECK-LABEL: func @mult_return(
+// CHECK-SAME: %[[T:.*]]: memref<?xf32, strided<[?], offset: ?>>, %[[COND:.*]]: i1,
+// CHECK-SAME: %[[A:.*]]: index, %[[B:.*]]: index) -> (memref<?xf32, strided<[?], offset: ?>>, index) {
+func.func @mult_return(%t: tensor<?xf32>, %cond:i1, %a: index, %b: index) -> (tensor<10xf32>, index) {
+ // CHECK: %[[RET:.*]] = call @mult_return_callee(%[[T]], %[[COND]], %[[A]], %[[B]]) : (memref<?xf32, strided<[?], offset: ?>>, i1, index, index) -> index
+ // CHECK: return %[[T]], %[[RET]] : memref<?xf32, strided<[?], offset: ?>>, index
+ %t_res, %v = func.call @mult_return_callee(%t, %cond, %a, %b) : (tensor<?xf32>, i1, index, index) -> (tensor<10xf32>, index)
+ return %t_res, %v : tensor<10xf32>, index
+}
diff --git a/mlir/test/Dialect/Transform/test-smt-extension-invalid.mlir b/mlir/test/Dialect/Transform/test-smt-extension-invalid.mlir
index 314b8d4..d91d69a 100644
--- a/mlir/test/Dialect/Transform/test-smt-extension-invalid.mlir
+++ b/mlir/test/Dialect/Transform/test-smt-extension-invalid.mlir
@@ -1,11 +1,40 @@
// RUN: mlir-opt %s --transform-interpreter --split-input-file --verify-diagnostics
+// CHECK-LABEL: @incorrect terminator
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @operands_not_one_to_one_with_vars(%arg0: !transform.any_op {transform.readonly}) {
+ %param_as_param = transform.param.constant 42 -> !transform.param<i64>
+ // expected-error@below {{op expected 'smt.yield' as terminator}}
+ transform.smt.constrain_params(%param_as_param) : (!transform.param<i64>) -> () {
+ ^bb0(%param_as_smt_var: !smt.int):
+ transform.yield
+ }
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: @operands_not_one_to_one_with_vars
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @operands_not_one_to_one_with_vars(%arg0: !transform.any_op {transform.readonly}) {
+ %param_as_param = transform.param.constant 42 -> !transform.param<i64>
+ // expected-error@below {{must have the same number of block arguments as operands}}
+ transform.smt.constrain_params(%param_as_param) : (!transform.param<i64>) -> () {
+ ^bb0(%param_as_smt_var: !smt.int, %param_as_another_smt_var: !smt.int):
+ }
+ transform.yield
+ }
+}
+
+// -----
+
// CHECK-LABEL: @constraint_not_using_smt_ops
module attributes {transform.with_named_sequence} {
transform.named_sequence @constraint_not_using_smt_ops(%arg0: !transform.any_op {transform.readonly}) {
%param_as_param = transform.param.constant 42 -> !transform.param<i64>
// expected-error@below {{ops contained in region should belong to SMT-dialect}}
- transform.smt.constrain_params(%param_as_param) : !transform.param<i64> {
+ transform.smt.constrain_params(%param_as_param) : (!transform.param<i64>) -> () {
^bb0(%param_as_smt_var: !smt.int):
%c4 = arith.constant 4 : i32
// This is the kind of thing one might think works:
@@ -17,13 +46,90 @@ module attributes {transform.with_named_sequence} {
// -----
-// CHECK-LABEL: @operands_not_one_to_one_with_vars
+// CHECK-LABEL: @results_not_one_to_one_with_vars
module attributes {transform.with_named_sequence} {
- transform.named_sequence @operands_not_one_to_one_with_vars(%arg0: !transform.any_op {transform.readonly}) {
+ transform.named_sequence @results_not_one_to_one_with_vars(%arg0: !transform.any_op {transform.readonly}) {
%param_as_param = transform.param.constant 42 -> !transform.param<i64>
- // expected-error@below {{must have the same number of block arguments as operands}}
- transform.smt.constrain_params(%param_as_param) : !transform.param<i64> {
+ transform.smt.constrain_params(%param_as_param, %param_as_param) : (!transform.param<i64>, !transform.param<i64>) -> () {
^bb0(%param_as_smt_var: !smt.int, %param_as_another_smt_var: !smt.int):
+ // expected-error@below {{expected terminator to have as many operands as the parent op has results}}
+ smt.yield %param_as_smt_var : !smt.int
+ }
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: @non_smt_type_block_args
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @non_smt_type_block_args(%arg0: !transform.any_op {transform.readonly}) {
+ %param_as_param = transform.param.constant 42 -> !transform.param<i8>
+ // expected-error@below {{the type of block arg #0 is expected to be either a !smt.bool, a !smt.int, or a !smt.bv}}
+ transform.smt.constrain_params(%param_as_param) : (!transform.param<i8>) -> (!transform.param<i8>) {
+ ^bb0(%param_as_smt_var: !transform.param<i8>):
+ smt.yield %param_as_smt_var : !transform.param<i8>
+ }
+ transform.yield
+ }
+}
+
+
+// -----
+
+// CHECK-LABEL: @mismatched_arg_type_bool
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @mismatched_arg_type_bool(%arg0: !transform.any_op {transform.readonly}) {
+ %param_as_param = transform.param.constant 42 -> !transform.param<i64>
+ // expected-error@below {{the type of block arg #0 is !smt.bool though the corresponding operand type ('!transform.param<i64>') is not wrapping i1}}
+ transform.smt.constrain_params(%param_as_param) : (!transform.param<i64>) -> (!transform.param<i64>) {
+ ^bb0(%param_as_smt_var: !smt.bool):
+ smt.yield %param_as_smt_var : !smt.bool
+ }
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: @mismatched_arg_type_bitvector
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @mismatched_arg_type_bitvector(%arg0: !transform.any_op {transform.readonly}) {
+ %param_as_param = transform.param.constant 42 -> !transform.param<i64>
+ // expected-error@below {{the type of block arg #0 is '!smt.bv<8>' though the corresponding operand type ('!transform.param<i64>') is not wrapping an integer type of the same bitwidth}}
+ transform.smt.constrain_params(%param_as_param) : (!transform.param<i64>) -> (!transform.param<i64>) {
+ ^bb0(%param_as_smt_var: !smt.bv<8>):
+ smt.yield %param_as_smt_var : !smt.bv<8>
+ }
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: @mismatched_result_type_bool
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @mismatched_result_type_bool(%arg0: !transform.any_op {transform.readonly}) {
+ %param_as_param = transform.param.constant 1 -> !transform.param<i1>
+ transform.smt.constrain_params(%param_as_param) : (!transform.param<i1>) -> (!transform.param<i64>) {
+ ^bb0(%param_as_smt_var: !smt.bool):
+ // expected-error@below {{the type of terminator operand #0 is !smt.bool though the corresponding result type ('!transform.param<i64>') is not wrapping i1}}
+ smt.yield %param_as_smt_var : !smt.bool
+ }
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: @mismatched_result_type_bitvector
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @mismatched_result_type_bitvector(%arg0: !transform.any_op {transform.readonly}) {
+ %param_as_param = transform.param.constant 42 -> !transform.param<i8>
+ transform.smt.constrain_params(%param_as_param) : (!transform.param<i8>) -> (!transform.param<i64>) {
+ ^bb0(%param_as_smt_var: !smt.bv<8>):
+ // expected-error@below {{the type of terminator operand #0 is '!smt.bv<8>' though the corresponding result type ('!transform.param<i64>') is not wrapping an integer type of the same bitwidth}}
+ smt.yield %param_as_smt_var : !smt.bv<8>
}
transform.yield
}
diff --git a/mlir/test/Dialect/Transform/test-smt-extension.mlir b/mlir/test/Dialect/Transform/test-smt-extension.mlir
index 29d1517..6cc41dd 100644
--- a/mlir/test/Dialect/Transform/test-smt-extension.mlir
+++ b/mlir/test/Dialect/Transform/test-smt-extension.mlir
@@ -7,7 +7,7 @@ module attributes {transform.with_named_sequence} {
%param_as_param = transform.param.constant 42 -> !transform.param<i64>
// CHECK: transform.smt.constrain_params(%[[PARAM_AS_PARAM]])
- transform.smt.constrain_params(%param_as_param) : !transform.param<i64> {
+ transform.smt.constrain_params(%param_as_param) : (!transform.param<i64>) -> () {
// CHECK: ^bb{{.*}}(%[[PARAM_AS_SMT_SYMB:.*]]: !smt.int):
^bb0(%param_as_smt_var: !smt.int):
// CHECK: %[[C0:.*]] = smt.int.constant 0
@@ -31,18 +31,20 @@ module attributes {transform.with_named_sequence} {
// -----
-// CHECK-LABEL: @schedule_with_constraint_on_multiple_params
+// CHECK-LABEL: @schedule_with_constraint_on_multiple_params_returning_computed_value
module attributes {transform.with_named_sequence} {
- transform.named_sequence @schedule_with_constraint_on_multiple_params(%arg0: !transform.any_op {transform.readonly}) {
+ transform.named_sequence @schedule_with_constraint_on_multiple_params_returning_computed_value(%arg0: !transform.any_op {transform.readonly}) {
// CHECK: %[[PARAM_A:.*]] = transform.param.constant
%param_a = transform.param.constant 4 -> !transform.param<i64>
// CHECK: %[[PARAM_B:.*]] = transform.param.constant
- %param_b = transform.param.constant 16 -> !transform.param<i64>
+ %param_b = transform.param.constant 32 -> !transform.param<i64>
// CHECK: transform.smt.constrain_params(%[[PARAM_A]], %[[PARAM_B]])
- transform.smt.constrain_params(%param_a, %param_b) : !transform.param<i64>, !transform.param<i64> {
+ %divisor = transform.smt.constrain_params(%param_a, %param_b) : (!transform.param<i64>, !transform.param<i64>) -> (!transform.param<i64>) {
// CHECK: ^bb{{.*}}(%[[VAR_A:.*]]: !smt.int, %[[VAR_B:.*]]: !smt.int):
^bb0(%var_a: !smt.int, %var_b: !smt.int):
+ // CHECK: %[[DIV:.*]] = smt.int.div %[[VAR_B]], %[[VAR_A]]
+ %divisor = smt.int.div %var_b, %var_a
// CHECK: %[[C0:.*]] = smt.int.constant 0
%c0 = smt.int.constant 0
// CHECK: %[[REMAINDER:.*]] = smt.int.mod %[[VAR_B]], %[[VAR_A]]
@@ -51,8 +53,11 @@ module attributes {transform.with_named_sequence} {
%eq = smt.eq %remainder, %c0 : !smt.int
// CHECK: smt.assert %[[EQ]]
smt.assert %eq
+ // CHECK: smt.yield %[[DIV]]
+ smt.yield %divisor : !smt.int
}
- // NB: from here can rely on that %param_a is a divisor of %param_b
+ // NB: from here can rely on that %param_a is a divisor of %param_b and
+ // that the relevant factor, 8, got associated to %divisor.
transform.yield
}
}
@@ -63,10 +68,10 @@ module attributes {transform.with_named_sequence} {
module attributes {transform.with_named_sequence} {
transform.named_sequence @schedule_with_param_as_a_bool(%arg0: !transform.any_op {transform.readonly}) {
// CHECK: %[[PARAM_AS_PARAM:.*]] = transform.param.constant
- %param_as_param = transform.param.constant true -> !transform.any_param
+ %param_as_param = transform.param.constant true -> !transform.param<i1>
// CHECK: transform.smt.constrain_params(%[[PARAM_AS_PARAM]])
- transform.smt.constrain_params(%param_as_param) : !transform.any_param {
+ transform.smt.constrain_params(%param_as_param) : (!transform.param<i1>) -> () {
// CHECK: ^bb{{.*}}(%[[PARAM_AS_SMT_VAR:.*]]: !smt.bool):
^bb0(%param_as_smt_var: !smt.bool):
// CHECK: %[[C0:.*]] = smt.int.constant 0
diff --git a/mlir/test/Dialect/Vector/linearize.mlir b/mlir/test/Dialect/Vector/linearize.mlir
index ee5cfbcd..cbbc833 100644
--- a/mlir/test/Dialect/Vector/linearize.mlir
+++ b/mlir/test/Dialect/Vector/linearize.mlir
@@ -428,6 +428,47 @@ func.func @test_linearize_across_for(%arg0 : vector<4xi8>) -> vector<4xi8> {
// -----
+// CHECK-LABEL: linearize_vector_broadcast_scalar_source
+// CHECK-SAME: (%[[ARG:.*]]: i32) -> vector<4x2xi32>
+func.func @linearize_vector_broadcast_scalar_source(%arg0: i32) -> vector<4x2xi32> {
+
+ // CHECK: %[[BROADCAST:.*]] = vector.broadcast %[[ARG]] : i32 to vector<8xi32>
+ // CHECK: %[[CAST:.*]] = vector.shape_cast %[[BROADCAST]] : vector<8xi32> to vector<4x2xi32>
+ // CHECK: return %[[CAST]] : vector<4x2xi32>
+ %0 = vector.broadcast %arg0 : i32 to vector<4x2xi32>
+ return %0 : vector<4x2xi32>
+}
+
+// -----
+
+// CHECK-LABEL: linearize_vector_broadcast_rank_two_source
+// CHECK-SAME: (%[[ARG:.*]]: vector<1x1xi32>) -> vector<4x2xi32>
+func.func @linearize_vector_broadcast_rank_two_source(%arg0: vector<1x1xi32>) -> vector<4x2xi32> {
+
+ // CHECK: %[[CAST0:.*]] = vector.shape_cast %[[ARG]] : vector<1x1xi32> to vector<1xi32>
+ // CHECK: %[[BROADCAST:.*]] = vector.broadcast %[[CAST0]] : vector<1xi32> to vector<8xi32>
+ // CHECK: %[[CAST1:.*]] = vector.shape_cast %[[BROADCAST]] : vector<8xi32> to vector<4x2xi32>
+ // CHECK: return %[[CAST1]] : vector<4x2xi32>
+ %0 = vector.broadcast %arg0 : vector<1x1xi32> to vector<4x2xi32>
+ return %0 : vector<4x2xi32>
+}
+
+// -----
+
+// CHECK-LABEL: linearize_scalable_vector_broadcast
+// CHECK-SAME: (%[[ARG:.*]]: i32) -> vector<4x[2]xi32>
+func.func @linearize_scalable_vector_broadcast(%arg0: i32) -> vector<4x[2]xi32> {
+
+ // CHECK: %[[BROADCAST:.*]] = vector.broadcast %[[ARG]] : i32 to vector<[8]xi32>
+ // CHECK: %[[CAST:.*]] = vector.shape_cast %[[BROADCAST]] : vector<[8]xi32> to vector<4x[2]xi32>
+ // CHECK: return %[[CAST]] : vector<4x[2]xi32>
+ %0 = vector.broadcast %arg0 : i32 to vector<4x[2]xi32>
+ return %0 : vector<4x[2]xi32>
+
+}
+
+// -----
+
// CHECK-LABEL: linearize_create_mask
// CHECK-SAME: (%[[ARG0:.*]]: index, %[[ARG1:.*]]: index) -> vector<1x16xi1>
func.func @linearize_create_mask(%arg0 : index, %arg1 : index) -> vector<1x16xi1> {
diff --git a/mlir/test/Dialect/Vector/vector-warp-distribute.mlir b/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
index 401cdd29..0cf6dd1 100644
--- a/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
+++ b/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
@@ -474,6 +474,41 @@ func.func @warp_scf_for_use_from_above(%arg0: index) {
}
// -----
+// CHECK-PROP-LABEL: func.func @warp_scf_for_local_loop_bounds
+// CHECK-PROP: (%{{.*}}: index, %[[ARG1:[a-zA-Z0-9]+]]: index) {
+// CHECK-PROP: %[[W:.*]] = gpu.warp_execute_on_lane_0(%{{.*}})[32] args(%[[ARG1]] : index) -> (vector<4xf32>) {
+// CHECK-PROP: ^bb0(%{{.*}}: index):
+// CHECK-PROP: %[[T2:.*]] = "some_def"() : () -> vector<128xf32>
+// CHECK-PROP: gpu.yield %[[T2]] : vector<128xf32>
+// CHECK-PROP: }
+// CHECK-PROP: %[[FOR:.*]] = scf.for %{{.*}} to %[[ARG1]] step %{{.*}} iter_args(%{{.*}}) -> (vector<4xf32>) {
+// CHECK-PROP: %[[W2:.*]] = gpu.warp_execute_on_lane_0(%{{.*}})[32]
+// CHECK-PROP-SAME: args(%{{.*}} : vector<4xf32>) -> (vector<4xf32>) {
+// CHECK-PROP: ^bb0(%{{.*}}: vector<128xf32>):
+// CHECK-PROP: gpu.yield %{{.*}} : vector<128xf32>
+// CHECK-PROP: }
+// CHECK-PROP: scf.yield %[[W2]] : vector<4xf32>
+// CHECK-PROP: }
+// CHECK-PROP: "some_use"(%[[FOR]]) : (vector<4xf32>) -> ()
+// CHECK-PROP: return
+func.func @warp_scf_for_local_loop_bounds(%arg0: index, %bound: index) {
+ %c1 = arith.constant 1 : index
+ %c0 = arith.constant 0 : index
+ %0 = gpu.warp_execute_on_lane_0(%arg0)[32]
+ args(%bound : index) -> (vector<4xf32>) {
+ ^bb0(%arg1: index):
+ %ini = "some_def"() : () -> (vector<128xf32>)
+ %3 = scf.for %arg3 = %c0 to %arg1 step %c1 iter_args(%arg4 = %ini) -> (vector<128xf32>) {
+ %acc = "some_def"(%arg4) : (vector<128xf32>) -> (vector<128xf32>)
+ scf.yield %acc : vector<128xf32>
+ }
+ gpu.yield %3 : vector<128xf32>
+ }
+ "some_use"(%0) : (vector<4xf32>) -> ()
+ return
+}
+
+// -----
// CHECK-PROP-LABEL: func @warp_scf_for_swap(
// CHECK-PROP: %[[INI:.*]]:2 = gpu.warp_execute_on_lane_0(%{{.*}})[32] -> (vector<4xf32>, vector<4xf32>) {
diff --git a/mlir/test/Dialect/WasmSSA/custom_parser/global.mlir b/mlir/test/Dialect/WasmSSA/custom_parser/global.mlir
index b9b3420..a25abbd 100644
--- a/mlir/test/Dialect/WasmSSA/custom_parser/global.mlir
+++ b/mlir/test/Dialect/WasmSSA/custom_parser/global.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s | FileCheck %s
module {
- wasmssa.import_global "from_js" from "env" as @global_0 nested : i32
+ wasmssa.import_global "from_js" from "env" as @global_0 : i32
wasmssa.global @global_1 i32 : {
%0 = wasmssa.const 10 : i32
@@ -21,7 +21,7 @@ module {
}
}
-// CHECK-LABEL: wasmssa.import_global "from_js" from "env" as @global_0 nested : i32
+// CHECK-LABEL: wasmssa.import_global "from_js" from "env" as @global_0 : i32
// CHECK-LABEL: wasmssa.global @global_1 i32 : {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
diff --git a/mlir/test/Dialect/WasmSSA/custom_parser/if.mlir b/mlir/test/Dialect/WasmSSA/custom_parser/if.mlir
index 01068cb..cee3c69 100644
--- a/mlir/test/Dialect/WasmSSA/custom_parser/if.mlir
+++ b/mlir/test/Dialect/WasmSSA/custom_parser/if.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s | FileCheck %s
-// CHECK-LABEL: wasmssa.func nested @func_0(
+// CHECK-LABEL: wasmssa.func @func_0(
// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
// CHECK: wasmssa.if %[[VAL_0]] : {
@@ -12,7 +12,7 @@
// CHECK: }> ^bb1
// CHECK: ^bb1(%[[VAL_3:.*]]: f32):
// CHECK: wasmssa.return %[[VAL_3]] : f32
-wasmssa.func nested @func_0(%arg0 : !wasmssa<local ref to i32>) -> i32 {
+wasmssa.func @func_0(%arg0 : !wasmssa<local ref to i32>) -> i32 {
%cond = wasmssa.local_get %arg0 : ref to i32
wasmssa.if %cond : {
%c0 = wasmssa.const 0.5 : f32
@@ -25,7 +25,7 @@ wasmssa.func nested @func_0(%arg0 : !wasmssa<local ref to i32>) -> i32 {
wasmssa.return %retVal : f32
}
-// CHECK-LABEL: wasmssa.func nested @func_1(
+// CHECK-LABEL: wasmssa.func @func_1(
// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
// CHECK: %[[VAL_1:.*]] = wasmssa.local of type i32
@@ -38,7 +38,7 @@ wasmssa.func nested @func_0(%arg0 : !wasmssa<local ref to i32>) -> i32 {
// CHECK: ^bb1:
// CHECK: %[[VAL_4:.*]] = wasmssa.local_get %[[VAL_1]] : ref to i32
// CHECK: wasmssa.return %[[VAL_4]] : i32
-wasmssa.func nested @func_1(%arg0 : !wasmssa<local ref to i32>) -> i32 {
+wasmssa.func @func_1(%arg0 : !wasmssa<local ref to i32>) -> i32 {
%cond = wasmssa.local_get %arg0 : ref to i32
%var = wasmssa.local of type i32
%zero = wasmssa.const 0
diff --git a/mlir/test/Dialect/WasmSSA/custom_parser/import.mlir b/mlir/test/Dialect/WasmSSA/custom_parser/import.mlir
index 3cc0548..dc23229 100644
--- a/mlir/test/Dialect/WasmSSA/custom_parser/import.mlir
+++ b/mlir/test/Dialect/WasmSSA/custom_parser/import.mlir
@@ -5,13 +5,13 @@ module {
wasmssa.import_func "bar" from "my_module" as @func_1 {sym_visibility = "nested", type = (i32) -> ()}
wasmssa.import_table "table" from "my_module" as @table_0 {sym_visibility = "nested", type = !wasmssa<tabletype !wasmssa.funcref [2:]>}
wasmssa.import_mem "mem" from "my_module" as @mem_0 {limits = !wasmssa<limit[2:]>, sym_visibility = "nested"}
- wasmssa.import_global "glob" from "my_module" as @global_0 nested : i32
- wasmssa.import_global "glob_mut" from "my_other_module" as @global_1 mutable nested : i32
+ wasmssa.import_global "glob" from "my_module" as @global_0 : i32
+ wasmssa.import_global "glob_mut" from "my_other_module" as @global_1 mutable : i32
}
// CHECK-LABEL: wasmssa.import_func "foo" from "my_module" as @func_0 {sym_visibility = "nested", type = (i32) -> ()}
// CHECK: wasmssa.import_func "bar" from "my_module" as @func_1 {sym_visibility = "nested", type = (i32) -> ()}
// CHECK: wasmssa.import_table "table" from "my_module" as @table_0 {sym_visibility = "nested", type = !wasmssa<tabletype !wasmssa.funcref [2:]>}
// CHECK: wasmssa.import_mem "mem" from "my_module" as @mem_0 {limits = !wasmssa<limit[2:]>, sym_visibility = "nested"}
-// CHECK: wasmssa.import_global "glob" from "my_module" as @global_0 nested : i32
-// CHECK: wasmssa.import_global "glob_mut" from "my_other_module" as @global_1 mutable nested : i32
+// CHECK: wasmssa.import_global "glob" from "my_module" as @global_0 : i32
+// CHECK: wasmssa.import_global "glob_mut" from "my_other_module" as @global_1 mutable : i32
diff --git a/mlir/test/Dialect/WasmSSA/custom_parser/local.mlir b/mlir/test/Dialect/WasmSSA/custom_parser/local.mlir
index 3f6423f..f613ebf 100644
--- a/mlir/test/Dialect/WasmSSA/custom_parser/local.mlir
+++ b/mlir/test/Dialect/WasmSSA/custom_parser/local.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s | FileCheck %s
module {
- wasmssa.func nested @func_0() -> f32 {
+ wasmssa.func @func_0() -> f32 {
%0 = wasmssa.local of type f32
%1 = wasmssa.local of type f32
%2 = wasmssa.const 8.000000e+00 : f32
@@ -9,7 +9,7 @@ module {
%4 = wasmssa.add %2 %3 : f32
wasmssa.return %4 : f32
}
- wasmssa.func nested @func_1() -> i32 {
+ wasmssa.func @func_1() -> i32 {
%0 = wasmssa.local of type i32
%1 = wasmssa.local of type i32
%2 = wasmssa.const 8 : i32
@@ -17,13 +17,13 @@ module {
%4 = wasmssa.add %2 %3 : i32
wasmssa.return %4 : i32
}
- wasmssa.func nested @func_2(%arg0: !wasmssa<local ref to i32>) -> i32 {
+ wasmssa.func @func_2(%arg0: !wasmssa<local ref to i32>) -> i32 {
%0 = wasmssa.const 3 : i32
wasmssa.return %0 : i32
}
}
-// CHECK-LABEL: wasmssa.func nested @func_0() -> f32 {
+// CHECK-LABEL: wasmssa.func @func_0() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.local of type f32
// CHECK: %[[VAL_1:.*]] = wasmssa.local of type f32
// CHECK: %[[VAL_2:.*]] = wasmssa.const 8.000000e+00 : f32
@@ -31,7 +31,7 @@ module {
// CHECK: %[[VAL_4:.*]] = wasmssa.add %[[VAL_2]] %[[VAL_3]] : f32
// CHECK: wasmssa.return %[[VAL_4]] : f32
-// CHECK-LABEL: wasmssa.func nested @func_1() -> i32 {
+// CHECK-LABEL: wasmssa.func @func_1() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.local of type i32
// CHECK: %[[VAL_1:.*]] = wasmssa.local of type i32
// CHECK: %[[VAL_2:.*]] = wasmssa.const 8 : i32
@@ -39,7 +39,7 @@ module {
// CHECK: %[[VAL_4:.*]] = wasmssa.add %[[VAL_2]] %[[VAL_3]] : i32
// CHECK: wasmssa.return %[[VAL_4]] : i32
-// CHECK-LABEL: wasmssa.func nested @func_2(
+// CHECK-LABEL: wasmssa.func @func_2(
// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 3 : i32
// CHECK: wasmssa.return %[[VAL_0]] : i32
diff --git a/mlir/test/Dialect/WasmSSA/custom_parser/memory.mlir b/mlir/test/Dialect/WasmSSA/custom_parser/memory.mlir
index 47551db..ca6ebe0 100644
--- a/mlir/test/Dialect/WasmSSA/custom_parser/memory.mlir
+++ b/mlir/test/Dialect/WasmSSA/custom_parser/memory.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s | FileCheck %s
-// CHECK: wasmssa.memory @mem0 public !wasmssa<limit[0: 65536]>
-wasmssa.memory @mem0 public !wasmssa<limit[0:65536]>
-
-// CHECK: wasmssa.memory @mem1 nested !wasmssa<limit[512:]>
+// CHECK: wasmssa.memory @mem1 !wasmssa<limit[512:]>
wasmssa.memory @mem1 !wasmssa<limit[512:]>
+
+// CHECK: wasmssa.memory exported @mem2 !wasmssa<limit[0: 65536]>
+wasmssa.memory exported @mem2 !wasmssa<limit[0:65536]>
diff --git a/mlir/test/Dialect/WasmSSA/custom_parser/table.mlir b/mlir/test/Dialect/WasmSSA/custom_parser/table.mlir
index 5a874f4..ea630de 100644
--- a/mlir/test/Dialect/WasmSSA/custom_parser/table.mlir
+++ b/mlir/test/Dialect/WasmSSA/custom_parser/table.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s | FileCheck %s
-// CHECK: wasmssa.table @tab0 public !wasmssa<tabletype !wasmssa.externref [0: 65536]>
-wasmssa.table @tab0 public !wasmssa<tabletype !wasmssa.externref [0:65536]>
+// CHECK: wasmssa.table exported @tab0 !wasmssa<tabletype !wasmssa.externref [0: 65536]>
+wasmssa.table exported @tab0 !wasmssa<tabletype !wasmssa.externref [0:65536]>
-// CHECK: wasmssa.table @tab1 nested !wasmssa<tabletype !wasmssa.funcref [348:]>
+// CHECK: wasmssa.table @tab1 !wasmssa<tabletype !wasmssa.funcref [348:]>
wasmssa.table @tab1 !wasmssa<tabletype !wasmssa.funcref [348:]>
diff --git a/mlir/test/Dialect/WasmSSA/extend-invalid.mlir b/mlir/test/Dialect/WasmSSA/extend-invalid.mlir
index 8d78280..7687e5f 100644
--- a/mlir/test/Dialect/WasmSSA/extend-invalid.mlir
+++ b/mlir/test/Dialect/WasmSSA/extend-invalid.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -split-input-file -verify-diagnostics
-wasmssa.func nested @extend_low_64() -> i32 {
+wasmssa.func @extend_low_64() -> i32 {
%0 = wasmssa.const 10 : i32
// expected-error@+1 {{extend op can only take 8, 16 or 32 bits. Got 64}}
%1 = wasmssa.extend 64 low bits from %0: i32
@@ -10,7 +10,7 @@ wasmssa.func nested @extend_low_64() -> i32 {
// -----
-wasmssa.func nested @extend_too_much() -> i32 {
+wasmssa.func @extend_too_much() -> i32 {
%0 = wasmssa.const 10 : i32
// expected-error@+1 {{trying to extend the 32 low bits from a 'i32' value is illegal}}
%1 = wasmssa.extend 32 low bits from %0: i32
diff --git a/mlir/test/Dialect/WasmSSA/global-invalid.mlir b/mlir/test/Dialect/WasmSSA/global-invalid.mlir
index b9cafd8..c5bc606 100644
--- a/mlir/test/Dialect/WasmSSA/global-invalid.mlir
+++ b/mlir/test/Dialect/WasmSSA/global-invalid.mlir
@@ -13,7 +13,7 @@ module {
// -----
module {
- wasmssa.import_global "glob" from "my_module" as @global_0 mutable nested : i32
+ wasmssa.import_global "glob" from "my_module" as @global_0 mutable : i32
wasmssa.global @global_1 i32 : {
// expected-error@+1 {{global.get op is considered constant if it's referring to a import.global symbol marked non-mutable}}
%0 = wasmssa.global_get @global_0 : i32
@@ -30,3 +30,13 @@ module {
wasmssa.return %0 : i32
}
}
+
+// -----
+
+module {
+ // expected-error@+1 {{expecting either `exported` or symbol name. got exproted}}
+ wasmssa.global exproted @global_1 i32 : {
+ %0 = wasmssa.const 17 : i32
+ wasmssa.return %0 : i32
+ }
+}
diff --git a/mlir/test/Dialect/WasmSSA/locals-invalid.mlir b/mlir/test/Dialect/WasmSSA/locals-invalid.mlir
index 35c590b..eaad80e 100644
--- a/mlir/test/Dialect/WasmSSA/locals-invalid.mlir
+++ b/mlir/test/Dialect/WasmSSA/locals-invalid.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -split-input-file -verify-diagnostics
-wasmssa.func nested @local_set_err(%arg0: !wasmssa<local ref to i32>) -> i64 {
+wasmssa.func @local_set_err(%arg0: !wasmssa<local ref to i32>) -> i64 {
%0 = wasmssa.const 3 : i64
// expected-error@+1 {{input type and result type of local.set do not match}}
wasmssa.local_set %arg0 : ref to i32 to %0 : i64
@@ -9,7 +9,7 @@ wasmssa.func nested @local_set_err(%arg0: !wasmssa<local ref to i32>) -> i64 {
// -----
-wasmssa.func nested @local_tee_err(%arg0: !wasmssa<local ref to i32>) -> i32 {
+wasmssa.func @local_tee_err(%arg0: !wasmssa<local ref to i32>) -> i32 {
%0 = wasmssa.const 3 : i64
// expected-error@+1 {{input type and output type of local.tee do not match}}
%1 = wasmssa.local_tee %arg0 : ref to i32 to %0 : i64
diff --git a/mlir/test/Integration/Dialect/XeGPU/LANE/lit.local.cfg b/mlir/test/Integration/Dialect/XeGPU/LANE/lit.local.cfg
new file mode 100644
index 0000000..d0d51c6
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/LANE/lit.local.cfg
@@ -0,0 +1,4 @@
+if not config.run_xevm_tests:
+ config.unsupported = True
+if not config.enable_levelzero_runner:
+ config.unsupported = True
diff --git a/mlir/test/Integration/Dialect/XeGPU/LANE/simple_gemm.mlir b/mlir/test/Integration/Dialect/XeGPU/LANE/simple_gemm.mlir
new file mode 100644
index 0000000..ffe29ef
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/LANE/simple_gemm.mlir
@@ -0,0 +1,121 @@
+// RUN: mlir-opt %s --gpu-lower-to-xevm-pipeline="xegpu-op-level=lane" \
+// RUN: | mlir-runner \
+// RUN: --shared-libs=%mlir_levelzero_runtime \
+// RUN: --shared-libs=%mlir_runner_utils \
+// RUN: --entry-point-result=void \
+// RUN: | FileCheck %s
+
+module @gemm attributes {gpu.container_module} {
+ gpu.module @kernel {
+ gpu.func @simple_gemm(%a: memref<256x256xf16>, %b: memref<256x256xf16>, %c: memref<256x256xf32>) kernel {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c8 = arith.constant 8 : index
+ %c16 = arith.constant 16 : index
+ %c32 = arith.constant 32 : index
+ %c256 = arith.constant 256 : index
+ %block_x = gpu.block_id x
+ %block_y = gpu.block_id y
+ %x_block_offset = arith.muli %block_x, %c8 : index
+ %y_block_offset = arith.muli %block_y, %c16 : index
+
+ %c_tdesc = xegpu.create_nd_tdesc %c : memref<256x256xf32> -> !xegpu.tensor_desc<8x16xf32>
+ %c_init_value = xegpu.load_nd %c_tdesc[%x_block_offset, %y_block_offset] : !xegpu.tensor_desc<8x16xf32> -> vector<8xf32>
+ %a_tdesc = xegpu.create_nd_tdesc %a : memref<256x256xf16> -> !xegpu.tensor_desc<8x16xf16>
+ %b_tdesc = xegpu.create_nd_tdesc %b : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16>
+
+ %r = scf.for %k = %c0 to %c256 step %c16 iter_args(%arg_c = %c_init_value) -> (vector<8xf32>) {
+ %a_val = xegpu.load_nd %a_tdesc[%x_block_offset, %k] : !xegpu.tensor_desc<8x16xf16> -> vector<8xf16>
+ %b_val = xegpu.load_nd %b_tdesc[%k, %y_block_offset] : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16>
+ %dpas = xegpu.dpas %a_val, %b_val, %arg_c : vector<8xf16>, vector<16xf16>, vector<8xf32> -> vector<8xf32>
+ scf.yield %dpas : vector<8xf32>
+ }
+ xegpu.store_nd %r, %c_tdesc[%x_block_offset, %y_block_offset] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>: vector<8xf32>, !xegpu.tensor_desc<8x16xf32>
+ gpu.return
+ }
+ }
+
+ func.func @test(%a : memref<256x256xf16>, %b : memref<256x256xf16>, %c : memref<256x256xf32>) -> memref<256x256xf32> attributes {llvm.emit_c_interface} {
+ %c1 = arith.constant 1 : index
+ %c16 = arith.constant 16 : index
+ %c32 = arith.constant 32 : index
+ %memref_a = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %memref_a, %a : memref<256x256xf16>, memref<256x256xf16>
+ %memref_b = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %memref_b, %b : memref<256x256xf16>, memref<256x256xf16>
+ %memref_c = gpu.alloc () : memref<256x256xf32>
+ gpu.memcpy %memref_c, %c : memref<256x256xf32>, memref<256x256xf32>
+ gpu.launch_func @kernel::@simple_gemm blocks in (%c32, %c16, %c1) threads in (%c16, %c1, %c1) args(%memref_a : memref<256x256xf16>, %memref_b : memref<256x256xf16>, %memref_c : memref<256x256xf32>)
+ gpu.wait // Wait for the kernel to finish.
+ gpu.memcpy %c, %memref_c : memref<256x256xf32>, memref<256x256xf32>
+ gpu.dealloc %memref_a : memref<256x256xf16>
+ gpu.dealloc %memref_b : memref<256x256xf16>
+ gpu.dealloc %memref_c : memref<256x256xf32>
+ return %c : memref<256x256xf32>
+ }
+
+ func.func @main() attributes {llvm.emit_c_interface} {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1_f16 = arith.constant 1.0 : f16
+ %c2_f16 = arith.constant 2.0 : f16
+ %c256 = arith.constant 256 : index
+ %cf_0 = arith.constant 0.0 : f16
+ %cf_1 = arith.constant 1.0 : f16
+ %A = memref.alloc() : memref<256x256xf16>
+ %B = memref.alloc() : memref<256x256xf16>
+ %C = memref.alloc() : memref<256x256xf32>
+ %C_ref = memref.alloc() : memref<256x256xf32>
+ %c_gen_int = arith.constant 0 : i1
+ %cf_lower = arith.constant -0.5 : f32
+ %cf_upper = arith.constant 0.5 : f32
+
+ // Initialize matrix A ; A[i, j] = j
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %t = index.castu %j : index to i16
+ %val = arith.uitofp %t : i16 to f16
+ memref.store %val, %A[%i, %j] : memref<256x256xf16>
+ }
+ }
+
+ // Initialize the B matrix.
+ // Make matrix B an identity matrix.
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %i_i32 = index.castu %i : index to i32
+ %j_i32 = index.castu %j : index to i32
+ %i_j_same = arith.cmpi eq, %i_i32, %j_i32 : i32
+
+ scf.if %i_j_same {
+ memref.store %cf_1, %B[%i, %j] : memref<256x256xf16>
+ } else {
+ memref.store %cf_0, %B[%i, %j] : memref<256x256xf16>
+ }
+ }
+ }
+
+ // Initialize matrix C and C_ref ; C[i, j] = 0
+ %c0_f32 = arith.constant 0.0 : f32
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ memref.store %c0_f32, %C[%i, %j] : memref<256x256xf32>
+ memref.store %c0_f32, %C_ref[%i, %j] : memref<256x256xf32>
+ }
+ }
+
+ // Run GPU version.
+ %2 = call @test(%A, %B, %C) : (memref<256x256xf16>, memref<256x256xf16>, memref<256x256xf32>) -> memref<256x256xf32>
+ %gpu_result_cast = memref.cast %2 : memref<256x256xf32> to memref<*xf32>
+
+ // CHECK: Unranked Memref base@ = 0x{{[0-9a-f]+}}
+ // CHECK-COUNT-256: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]
+ call @printMemrefF32(%gpu_result_cast) : (memref<*xf32>) -> ()
+ memref.dealloc %A : memref<256x256xf16>
+ memref.dealloc %B : memref<256x256xf16>
+ memref.dealloc %C : memref<256x256xf32>
+ memref.dealloc %C_ref : memref<256x256xf32>
+ return
+ }
+ func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface}
+}
diff --git a/mlir/test/Integration/Dialect/XeGPU/SG/lit.local.cfg b/mlir/test/Integration/Dialect/XeGPU/SG/lit.local.cfg
new file mode 100644
index 0000000..d0d51c6
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/SG/lit.local.cfg
@@ -0,0 +1,4 @@
+if not config.run_xevm_tests:
+ config.unsupported = True
+if not config.enable_levelzero_runner:
+ config.unsupported = True
diff --git a/mlir/test/Integration/Dialect/XeGPU/SG/simple_gemm.mlir b/mlir/test/Integration/Dialect/XeGPU/SG/simple_gemm.mlir
new file mode 100644
index 0000000..877edf4
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/SG/simple_gemm.mlir
@@ -0,0 +1,120 @@
+// RUN: mlir-opt %s --gpu-lower-to-xevm-pipeline="xegpu-op-level=subgroup" \
+// RUN: | mlir-runner \
+// RUN: --shared-libs=%mlir_levelzero_runtime \
+// RUN: --shared-libs=%mlir_runner_utils \
+// RUN: --entry-point-result=void \
+// RUN: | FileCheck %s
+
+module @gemm attributes {gpu.container_module} {
+ gpu.module @kernel {
+ gpu.func @simple_gemm(%a: memref<256x256xf16>, %b: memref<256x256xf16>, %c: memref<256x256xf32>) kernel {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c8 = arith.constant 8 : index
+ %c16 = arith.constant 16 : index
+ %c32 = arith.constant 32 : index
+ %c256 = arith.constant 256 : index
+ %block_x = gpu.block_id x
+ %block_y = gpu.block_id y
+ %x_block_offset = arith.muli %block_x, %c8 : index
+ %y_block_offset = arith.muli %block_y, %c16 : index
+
+ %c_tdesc = xegpu.create_nd_tdesc %c : memref<256x256xf32> -> !xegpu.tensor_desc<8x16xf32>
+ %c_init_value = xegpu.load_nd %c_tdesc[%x_block_offset, %y_block_offset] : !xegpu.tensor_desc<8x16xf32> -> vector<8x16xf32>
+ %a_tdesc = xegpu.create_nd_tdesc %a : memref<256x256xf16> -> !xegpu.tensor_desc<8x16xf16>
+ %b_tdesc = xegpu.create_nd_tdesc %b : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16>
+
+ %r = scf.for %k = %c0 to %c256 step %c16 iter_args(%arg_c = %c_init_value) -> (vector<8x16xf32>) {
+ %a_val = xegpu.load_nd %a_tdesc[%x_block_offset, %k] : !xegpu.tensor_desc<8x16xf16> -> vector<8x16xf16>
+ %b_val = xegpu.load_nd %b_tdesc[%k, %y_block_offset] : !xegpu.tensor_desc<16x16xf16> -> vector<16x16xf16>
+ %dpas = xegpu.dpas %a_val, %b_val, %arg_c : vector<8x16xf16>, vector<16x16xf16>, vector<8x16xf32> -> vector<8x16xf32>
+ scf.yield %dpas : vector<8x16xf32>
+ }
+ xegpu.store_nd %r, %c_tdesc[%x_block_offset, %y_block_offset] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>: vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32>
+ gpu.return
+ }
+ }
+
+ func.func @test(%a : memref<256x256xf16>, %b : memref<256x256xf16>, %c : memref<256x256xf32>) -> memref<256x256xf32> attributes {llvm.emit_c_interface} {
+ %c1 = arith.constant 1 : index
+ %c16 = arith.constant 16 : index
+ %c32 = arith.constant 32 : index
+ %memref_a = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %memref_a, %a : memref<256x256xf16>, memref<256x256xf16>
+ %memref_b = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %memref_b, %b : memref<256x256xf16>, memref<256x256xf16>
+ %memref_c = gpu.alloc () : memref<256x256xf32>
+ gpu.memcpy %memref_c, %c : memref<256x256xf32>, memref<256x256xf32>
+ gpu.launch_func @kernel::@simple_gemm blocks in (%c32, %c16, %c1) threads in (%c16, %c1, %c1) args(%memref_a : memref<256x256xf16>, %memref_b : memref<256x256xf16>, %memref_c : memref<256x256xf32>)
+ gpu.wait // Wait for the kernel to finish.
+ gpu.memcpy %c, %memref_c : memref<256x256xf32>, memref<256x256xf32>
+ gpu.dealloc %memref_a : memref<256x256xf16>
+ gpu.dealloc %memref_b : memref<256x256xf16>
+ gpu.dealloc %memref_c : memref<256x256xf32>
+ return %c : memref<256x256xf32>
+ }
+
+
+ func.func @main() attributes {llvm.emit_c_interface} {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1_f16 = arith.constant 1.0 : f16
+ %c2_f16 = arith.constant 2.0 : f16
+ %c256 = arith.constant 256 : index
+ %cf_0 = arith.constant 0.0 : f16
+ %cf_1 = arith.constant 1.0 : f16
+ %A = memref.alloc() : memref<256x256xf16>
+ %B = memref.alloc() : memref<256x256xf16>
+ %C = memref.alloc() : memref<256x256xf32>
+ %C_ref = memref.alloc() : memref<256x256xf32>
+ %c_gen_int = arith.constant 0 : i1
+ %cf_lower = arith.constant -0.5 : f32
+ %cf_upper = arith.constant 0.5 : f32
+ // Option 1: intialize matrix A ; A[i, j] = j
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %t = index.castu %j : index to i16
+ %val = arith.uitofp %t : i16 to f16
+ memref.store %val, %A[%i, %j] : memref<256x256xf16>
+ }
+ }
+
+ // Initialize the B matrix
+ // Make matrix B an identity matrix
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %i_i32 = index.castu %i : index to i32
+ %j_i32 = index.castu %j : index to i32
+ %i_j_same = arith.cmpi eq, %i_i32, %j_i32 : i32
+
+ scf.if %i_j_same {
+ memref.store %cf_1, %B[%i, %j] : memref<256x256xf16>
+ } else {
+ memref.store %cf_0, %B[%i, %j] : memref<256x256xf16>
+ }
+ }
+ }
+ // intialize matrix C and C_ref ; C[i, j] = 0
+ %c0_f32 = arith.constant 0.0 : f32
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ memref.store %c0_f32, %C[%i, %j] : memref<256x256xf32>
+ memref.store %c0_f32, %C_ref[%i, %j] : memref<256x256xf32>
+ }
+ }
+
+ // Run GPU.
+ %2 = call @test(%A, %B, %C) : (memref<256x256xf16>, memref<256x256xf16>, memref<256x256xf32>) -> memref<256x256xf32>
+ %cast_C = memref.cast %2 : memref<256x256xf32> to memref<*xf32>
+ // CHECK: Unranked Memref base@ = 0x{{[0-9a-f]+}}
+ // CHECK-COUNT-256: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]
+ call @printMemrefF32(%cast_C) : (memref<*xf32>) -> ()
+
+ memref.dealloc %A : memref<256x256xf16>
+ memref.dealloc %B : memref<256x256xf16>
+ memref.dealloc %C : memref<256x256xf32>
+ memref.dealloc %C_ref : memref<256x256xf32>
+ return
+ }
+ func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface}
+}
diff --git a/mlir/test/Integration/Dialect/XeGPU/WG/lit.local.cfg b/mlir/test/Integration/Dialect/XeGPU/WG/lit.local.cfg
new file mode 100644
index 0000000..d0d51c6
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/WG/lit.local.cfg
@@ -0,0 +1,4 @@
+if not config.run_xevm_tests:
+ config.unsupported = True
+if not config.enable_levelzero_runner:
+ config.unsupported = True
diff --git a/mlir/test/Integration/Dialect/XeGPU/WG/simple_gemm.mlir b/mlir/test/Integration/Dialect/XeGPU/WG/simple_gemm.mlir
new file mode 100644
index 0000000..3f2fff9
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/WG/simple_gemm.mlir
@@ -0,0 +1,151 @@
+// RUN: mlir-opt %s --gpu-lower-to-xevm-pipeline="xegpu-op-level=workgroup" \
+// RUN: | mlir-runner \
+// RUN: --shared-libs=%mlir_levelzero_runtime \
+// RUN: --shared-libs=%mlir_runner_utils \
+// RUN: --entry-point-result=void \
+// RUN: | FileCheck %s
+
+#a = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32], inst_data = [8, 16]>
+#b = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], inst_data = [16, 16]>
+#c = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], inst_data = [8, 16]>
+#a_prefetch = #xegpu.layout<sg_layout = [32, 1], sg_data = [8, 32], inst_data = [8, 16]>
+#b_prefetch = #xegpu.layout<sg_layout = [4, 8], sg_data = [8, 32], inst_data = [8, 16]>
+module @gemm attributes {gpu.container_module} {
+ func.func @test(%A: memref<256x256xf16>, %B: memref<256x256xf16>, %C: memref<256x256xf32>) -> memref<256x256xf32> attributes {llvm.emit_c_interface} {
+ %c1 = arith.constant 1 : index
+ %c4 = arith.constant 4 : index
+ %c8 = arith.constant 8 : index
+ %c16 = arith.constant 16 : index
+ %c32 = arith.constant 32 : index
+ %c64 = arith.constant 64 : index
+ %c128 = arith.constant 128 : index
+ %c512 = arith.constant 512 : index
+ %A_gpu = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %A_gpu, %A : memref<256x256xf16>, memref<256x256xf16>
+ %B_gpu = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %B_gpu, %B : memref<256x256xf16>, memref<256x256xf16>
+ %C_gpu = gpu.alloc () : memref<256x256xf32>
+ gpu.memcpy %C_gpu, %C : memref<256x256xf32>, memref<256x256xf32>
+ // NOTE: Here we can't use [8, 64] wi threads following
+ // the SG thread layout of [8, 4]. Because runtime will linearize
+ // the x dimension first (we need y dimension to be linearized first).
+ // So just use linearized thread layout of [512, 1] wi threads.
+ gpu.launch_func @test_kernel::@test_kernel blocks in (%c1, %c1, %c1) threads in (%c512, %c1, %c1) args(%A_gpu : memref<256x256xf16>, %B_gpu : memref<256x256xf16>, %C_gpu : memref<256x256xf32>)
+ gpu.wait // Wait for the kernel to finish.
+ gpu.memcpy %C, %C_gpu : memref<256x256xf32>, memref<256x256xf32>
+ gpu.dealloc %A_gpu : memref<256x256xf16>
+ gpu.dealloc %B_gpu : memref<256x256xf16>
+ gpu.dealloc %C_gpu : memref<256x256xf32>
+ return %C : memref<256x256xf32>
+ }
+
+ gpu.module @test_kernel {
+ gpu.func @test_kernel(%A: memref<256x256xf16>, %B: memref<256x256xf16>, %C: memref<256x256xf32>) kernel {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c32 = arith.constant 32 : index
+ %c64 = arith.constant 64 : index
+ %c96 = arith.constant 96 : index
+ %c256 = arith.constant 256 : index
+ %c4096 = arith.constant 4096 : index
+ %block_id_x = gpu.block_id x
+ %block_id_y = gpu.block_id y
+ %m = arith.muli %block_id_x, %c256 : index
+ %n = arith.muli %block_id_y, %c256 : index
+ %c_tdesc = xegpu.create_nd_tdesc %C : memref<256x256xf32> -> !xegpu.tensor_desc<256x256xf32, #c>
+ %c_init_value = xegpu.load_nd %c_tdesc[%m, %n] : !xegpu.tensor_desc<256x256xf32, #c> -> vector<256x256xf32>
+ %a_tdesc = xegpu.create_nd_tdesc %A : memref<256x256xf16> -> !xegpu.tensor_desc<256x32xf16, #a>
+ %b_tdesc = xegpu.create_nd_tdesc %B : memref<256x256xf16> -> !xegpu.tensor_desc<32x256xf16, #b>
+ // Prefetch A 3 times.
+ %a_prefetch_tdesc = xegpu.create_nd_tdesc %A : memref<256x256xf16> -> !xegpu.tensor_desc<256x32xf16, #a_prefetch>
+ xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c0] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
+ xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c32] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
+ xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c64] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
+ // Prefetch B 3 times.
+ %b_prefetch_tdesc = xegpu.create_nd_tdesc %B : memref<256x256xf16> -> !xegpu.tensor_desc<32x256xf16, #b_prefetch>
+ xegpu.prefetch_nd %b_prefetch_tdesc[%c0, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
+ xegpu.prefetch_nd %b_prefetch_tdesc[%c32, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
+ xegpu.prefetch_nd %b_prefetch_tdesc[%c64, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
+
+ %out = scf.for %k = %c0 to %c256 step %c32
+ iter_args(%c_value = %c_init_value)
+ -> (vector<256x256xf32>) {
+ %a_value = xegpu.load_nd %a_tdesc[%m, %k] : !xegpu.tensor_desc<256x32xf16, #a> -> vector<256x32xf16>
+ %b_value = xegpu.load_nd %b_tdesc[%k, %n] : !xegpu.tensor_desc<32x256xf16, #b> -> vector<32x256xf16>
+ // Prefetch next tiles.
+ %prefetch_offset = arith.addi %k, %c96 : index
+ xegpu.prefetch_nd %a_prefetch_tdesc[%m, %prefetch_offset] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
+ xegpu.prefetch_nd %b_prefetch_tdesc[%prefetch_offset, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
+ %c_new_value = xegpu.dpas %a_value, %b_value, %c_value {layout_result_0 = #c}
+ : vector<256x32xf16>, vector<32x256xf16>, vector<256x256xf32> -> vector<256x256xf32>
+ scf.yield %c_new_value : vector<256x256xf32>
+ }
+ xegpu.store_nd %out, %c_tdesc[%m, %n] : vector<256x256xf32>, !xegpu.tensor_desc<256x256xf32, #c>
+ gpu.return
+ }
+ }
+
+ func.func @main() attributes {llvm.emit_c_interface} {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1_f16 = arith.constant 1.0 : f16
+ %c2_f16 = arith.constant 2.0 : f16
+ %c256 = arith.constant 256 : index
+ %cf_0 = arith.constant 0.0 : f16
+ %cf_1 = arith.constant 1.0 : f16
+ %A = memref.alloc() : memref<256x256xf16>
+ %B = memref.alloc() : memref<256x256xf16>
+ %C = memref.alloc() : memref<256x256xf32>
+ %C_ref = memref.alloc() : memref<256x256xf32>
+ %c_gen_int = arith.constant 0 : i1
+ %cf_lower = arith.constant -0.5 : f32
+ %cf_upper = arith.constant 0.5 : f32
+ // Intialize matrix A ; A[i, j] = j
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %t = index.castu %j : index to i16
+ %val = arith.uitofp %t : i16 to f16
+ memref.store %val, %A[%i, %j] : memref<256x256xf16>
+ }
+ }
+
+ // Initialize the B matrix
+ // Make matrix B an identity matrix
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %i_i32 = index.castu %i : index to i32
+ %j_i32 = index.castu %j : index to i32
+ %i_j_same = arith.cmpi eq, %i_i32, %j_i32 : i32
+
+ scf.if %i_j_same {
+ memref.store %cf_1, %B[%i, %j] : memref<256x256xf16>
+ } else {
+ memref.store %cf_0, %B[%i, %j] : memref<256x256xf16>
+ }
+ }
+ }
+
+ // Initialize matrix C and C_ref ; C[i, j] = 0
+ %c0_f32 = arith.constant 0.0 : f32
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ memref.store %c0_f32, %C[%i, %j] : memref<256x256xf32>
+ memref.store %c0_f32, %C_ref[%i, %j] : memref<256x256xf32>
+ }
+ }
+
+ // Run GPU version.
+ %2 = call @test(%A, %B, %C) : (memref<256x256xf16>, memref<256x256xf16>, memref<256x256xf32>) -> memref<256x256xf32>
+ %gpu_result_cast = memref.cast %2 : memref<256x256xf32> to memref<*xf32>
+ // CHECK: Unranked Memref base@ = 0x{{[0-9a-f]+}}
+ // CHECK-COUNT-256: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]
+ call @printMemrefF32(%gpu_result_cast) : (memref<*xf32>) -> ()
+
+ memref.dealloc %A : memref<256x256xf16>
+ memref.dealloc %B : memref<256x256xf16>
+ memref.dealloc %C : memref<256x256xf32>
+ memref.dealloc %C_ref : memref<256x256xf32>
+ return
+ }
+ func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface}
+}
diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir b/mlir/test/Target/LLVMIR/rocdl.mlir
index 6536fac..30126f6 100644
--- a/mlir/test/Target/LLVMIR/rocdl.mlir
+++ b/mlir/test/Target/LLVMIR/rocdl.mlir
@@ -872,9 +872,11 @@ llvm.func @rocdl.mfma.scale.f32.16x16x128.f8f6f4(%arg0 : i32,
}
llvm.func @rocdl.wmma(%arg0 : vector<8xf32>, %arg1 : vector<16 x f16>, %arg2 : vector<16 x i16>, %arg3 : vector<8 x i32>,
- %arg4 : vector<2xi32>, %arg5 : vector<4xi32>, %arg6 : vector<4xf32>, %arg7 : vector<8xf16>, %arg8 : vector<8xi16>) -> vector<8xf32> {
+ %arg4 : vector<2xi32>, %arg5 : vector<4xi32>, %arg6 : vector<4xf32>, %arg7 : vector<8xf16>, %arg8 : vector<8xi16>,
+ %arg9 : vector<32xf16>, %arg10 : vector<16xf32>, %arg11 : vector<4xf32>, %arg12 : vector<32xf32>, %arg13 : vector<64xf32>,
+ %arg14 : vector<64xi32>, %arg15 : vector<64xf16>, %arg16 : vector<16xbf16>, %arg17 : vector<32xbf16>) -> vector<8xf32> {
%zero = llvm.mlir.constant(false) : i1
-
+ %zero_i16 = llvm.mlir.constant(0 : i16) : i16
// ---- Wave32 -----
// f16 -> f32
@@ -905,6 +907,83 @@ llvm.func @rocdl.wmma(%arg0 : vector<8xf32>, %arg1 : vector<16 x f16>, %arg2 : v
// CHECK: call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v8i32.v2i32(i1 {{.*}}, <2 x i32> %{{.*}}, i1 {{.*}}, <2 x i32> %{{.*}}, <8 x i32> %{{.*}}, i1 {{.*}})
%r6.gfx12 = rocdl.wmma.i32.16x16x32.iu4 %zero, %arg4, %zero, %arg4, %arg3, %zero : (i1, vector<2xi32>, i1, vector<2xi32>, vector<8xi32>, i1) -> vector<8xi32>
+ // f32 -> f32
+ // CHECK: call <4 x float> @llvm.amdgcn.wmma.f32.16x16x4.f32.v4f32.v16f32(i1 {{.*}}, <16 x float> %{{.*}}, i1 {{.*}}, <16 x float> %{{.*}}, i16 0, <4 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r1.gfx1250 = rocdl.wmma.f32.16x16x4.f32 %zero, %arg10, %zero, %arg10, %zero_i16, %arg11, %zero, %zero : (i1, vector<16xf32>, i1, vector<16xf32>, i16, vector<4xf32>, i1, i1) -> vector<4xf32>
+
+ // f16 -> f32
+ // CHECK: call <32 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v32f32.v16f16(i1 {{.*}}, <16 x half> %{{.*}}, i1 {{.*}}, <16 x half> %{{.*}}, i16 0, <32 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r2.gfx1250 = rocdl.wmma.f32.16x16x32.f16 %zero, %arg1, %zero, %arg1, %zero_i16, %arg12, %zero, %zero : (i1, vector<16xf16>, i1, vector<16xf16>, i16, vector<32xf32>, i1, i1) -> vector<32xf32>
+
+ // bf16 -> f32
+ // CHECK: call <32 x float> @llvm.amdgcn.wmma.f32.16x16x32.bf16.v32f32.v16bf16(i1 {{.*}}, <16 x bfloat> %{{.*}}, i1 {{.*}}, <16 x bfloat> %{{.*}}, i16 0, <32 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r3.gfx1250 = rocdl.wmma.f32.16x16x32.bf16 %zero, %arg16, %zero, %arg16, %zero_i16, %arg12, %zero, %zero : (i1, vector<16xbf16>, i1, vector<16xbf16>, i16, vector<32xf32>, i1, i1) -> vector<32xf32>
+
+ // f16 -> f16
+ // CHECK: call <32 x half> @llvm.amdgcn.wmma.f16.16x16x32.f16.v32f16.v16f16(i1 {{.*}}, <16 x half> %{{.*}}, i1 {{.*}}, <16 x half> %{{.*}}, i16 0, <32 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r4.gfx1250 = rocdl.wmma.f16.16x16x32.f16 %zero, %arg1, %zero, %arg1, %zero_i16, %arg9, %zero, %zero : (i1, vector<16xf16>, i1, vector<16xf16>, i16, vector<32xf16>, i1, i1) -> vector<32xf16>
+
+ // bf16 -> bf16
+ // CHECK: call <32 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x32.bf16.v32bf16.v16bf16(i1 {{.*}}, <16 x bfloat> %{{.*}}, i1 {{.*}}, <16 x bfloat> %{{.*}}, i16 0, <32 x bfloat> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r5.gfx1250 = rocdl.wmma.bf16.16x16x32.bf16 %zero, %arg16, %zero, %arg16, %zero_i16, %arg17, %zero, %zero : (i1, vector<16xbf16>, i1, vector<16xbf16>, i16, vector<32xbf16>, i1, i1) -> vector<32xbf16>
+
+ // bf16 -> bf16 / f32
+ // CHECK: call <32 x bfloat> @llvm.amdgcn.wmma.bf16f32.16x16x32.bf16.v32bf16.v16bf16.v32f32(i1 {{.*}}, <16 x bfloat> %{{.*}}, i1 {{.*}}, <16 x bfloat> %{{.*}}, i16 0, <32 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r6.gfx1250 = rocdl.wmma.bf16f32.16x16x32.bf16 %zero, %arg16, %zero, %arg16, %zero_i16, %arg12, %zero, %zero : (i1, vector<16xbf16>, i1, vector<16xbf16>, i16, vector<32xf32>, i1, i1) -> vector<32xbf16>
+
+ // f8/bf8 -> f16/f32
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x64.fp8.fp8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r7.gfx1250 = rocdl.wmma.f32.16x16x64.fp8_fp8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x64.fp8.bf8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r8.gfx1250 = rocdl.wmma.f32.16x16x64.fp8_bf8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x64.bf8.fp8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r9.gfx1250 = rocdl.wmma.f32.16x16x64.bf8_fp8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x64.bf8.bf8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r10.gfx1250 = rocdl.wmma.f32.16x16x64.bf8_bf8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x64.fp8.fp8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r11.gfx1250 = rocdl.wmma.f16.16x16x64.fp8_fp8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x64.fp8.bf8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r12.gfx1250 = rocdl.wmma.f16.16x16x64.fp8_bf8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x64.bf8.fp8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r13.gfx1250 = rocdl.wmma.f16.16x16x64.bf8_fp8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x64.bf8.bf8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r14.gfx1250 = rocdl.wmma.f16.16x16x64.bf8_bf8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.fp8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r15.gfx1250 = rocdl.wmma.f32.16x16x128.fp8_fp8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r16.gfx1250 = rocdl.wmma.f32.16x16x128.fp8_bf8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r17.gfx1250 = rocdl.wmma.f32.16x16x128.bf8_fp8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r18.gfx1250 = rocdl.wmma.f32.16x16x128.bf8_bf8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x128.fp8.fp8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r19.gfx1250 = rocdl.wmma.f16.16x16x128.fp8_fp8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x128.fp8.bf8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r20.gfx1250 = rocdl.wmma.f16.16x16x128.fp8_bf8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x128.bf8.fp8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r21.gfx1250 = rocdl.wmma.f16.16x16x128.bf8_fp8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x128.bf8.bf8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r22.gfx1250 = rocdl.wmma.f16.16x16x128.bf8_bf8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // iu8 -> i32
+ // CHECK: call <64 x i32> @llvm.amdgcn.wmma.i32.16x16x64.iu8.v64i32.v4i32(i1 {{.*}}, <4 x i32> %{{.*}}, i1 {{.*}}, <4 x i32> %{{.*}}, <64 x i32> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r23.gfx1250 = rocdl.wmma.i32.16x16x64.iu8 %zero, %arg5, %zero, %arg5, %arg14, %zero, %zero : (i1, vector<4xi32>, i1, vector<4xi32>, vector<64xi32>, i1, i1) -> vector<64xi32>
+
// ---- Wave64 -----
// f16 -> f32
@@ -1477,6 +1556,52 @@ llvm.func @rocdl.cvt.scale.pk16(%v3xi32: vector<3xi32>, %scale:i32) {
llvm.return
}
+// CHECK-LABEL: rocdl.cvt.scalef32.pk16
+// CHECK-SAME:(<16 x float> %[[V16F32:.+]], <16 x half> %[[V16F16:.+]], <16 x bfloat> %[[V16BF16:.+]], float %[[SCALE:.+]])
+llvm.func @rocdl.cvt.scalef32.pk16(%v16xf32: vector<16xf32>, %v16xf16: vector<16xf16>, %v16xbf16: vector<16xbf16>, %scale: f32) {
+
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.f16(<16 x half> %[[V16F16]], float %[[SCALE]])
+ %0 = rocdl.cvt.scalef32.pk16.fp6.f16 %v16xf16, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.bf16(<16 x bfloat> %[[V16BF16]], float %[[SCALE]])
+ %1 = rocdl.cvt.scalef32.pk16.fp6.bf16 %v16xbf16, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.f32(<16 x float> %[[V16F32]], float %[[SCALE]])
+ %2 = rocdl.cvt.scalef32.pk16.fp6.f32 %v16xf32, %scale : vector<3xi32>
+
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.f16(<16 x half> %[[V16F16]], float %[[SCALE]])
+ %3 = rocdl.cvt.scalef32.pk16.bf6.f16 %v16xf16, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.bf16(<16 x bfloat> %[[V16BF16]], float %[[SCALE]])
+ %4 = rocdl.cvt.scalef32.pk16.bf6.bf16 %v16xbf16, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.f32(<16 x float> %[[V16F32]], float %[[SCALE]])
+ %5 = rocdl.cvt.scalef32.pk16.bf6.f32 %v16xf32, %scale : vector<3xi32>
+
+ llvm.return
+}
+
+// CHECK-LABEL: rocdl.cvt.scalef32.sr.pk16
+// CHECK-SAME:(<16 x float> %[[V16F32:.+]], <16 x half> %[[V16F16:.+]], <16 x bfloat> %[[V16BF16:.+]], i32 %[[SEED:.+]], float %[[SCALE:.+]])
+llvm.func @rocdl.cvt.scalef32.sr.pk16(%v16xf32: vector<16xf32>,
+ %v16xf16: vector<16xf16>,
+ %v16xbf16: vector<16xbf16>,
+ %seed: i32,
+ %scale: f32) {
+
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.f16(<16 x half> %[[V16F16]], i32 %[[SEED]], float %[[SCALE]])
+ %0 = rocdl.cvt.scalef32.sr.pk16.fp6.f16 %v16xf16, %seed, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.bf16(<16 x bfloat> %[[V16BF16]], i32 %[[SEED]], float %[[SCALE]])
+ %1 = rocdl.cvt.scalef32.sr.pk16.fp6.bf16 %v16xbf16, %seed, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.f32(<16 x float> %[[V16F32]], i32 %[[SEED]], float %[[SCALE]])
+ %2 = rocdl.cvt.scalef32.sr.pk16.fp6.f32 %v16xf32, %seed, %scale : vector<3xi32>
+
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.f16(<16 x half> %[[V16F16]], i32 %[[SEED]], float %[[SCALE]])
+ %3 = rocdl.cvt.scalef32.sr.pk16.bf6.f16 %v16xf16, %seed, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.bf16(<16 x bfloat> %[[V16BF16]], i32 %[[SEED]], float %[[SCALE]])
+ %4 = rocdl.cvt.scalef32.sr.pk16.bf6.bf16 %v16xbf16, %seed, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.f32(<16 x float> %[[V16F32]], i32 %[[SEED]], float %[[SCALE]])
+ %5 = rocdl.cvt.scalef32.sr.pk16.bf6.f32 %v16xf32, %seed, %scale : vector<3xi32>
+
+ llvm.return
+}
+
// CHECK-DAG: attributes #[[$KERNEL_ATTRS]] = { "amdgpu-flat-work-group-size"="1,256" "uniform-work-group-size"="true" }
// CHECK-DAG: attributes #[[$KERNEL_WORKGROUP_ATTRS]] = { "amdgpu-flat-work-group-size"="1,1024"
// CHECK-DAG: attributes #[[$KNOWN_BLOCK_SIZE_ATTRS]] = { "amdgpu-flat-work-group-size"="128,128"
diff --git a/mlir/test/Target/Wasm/abs.mlir b/mlir/test/Target/Wasm/abs.mlir
index 9c45ba7..fe3602a 100644
--- a/mlir/test/Target/Wasm/abs.mlir
+++ b/mlir/test/Target/Wasm/abs.mlir
@@ -12,12 +12,12 @@
)
*/
-// CHECK-LABEL: wasmssa.func @abs_f32() -> f32 {
+// CHECK-LABEL: wasmssa.func exported @abs_f32() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f32
// CHECK: %[[VAL_1:.*]] = wasmssa.abs %[[VAL_0]] : f32
// CHECK: wasmssa.return %[[VAL_1]] : f32
-// CHECK-LABEL: wasmssa.func @abs_f64() -> f64 {
+// CHECK-LABEL: wasmssa.func exported @abs_f64() -> f64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f64
// CHECK: %[[VAL_1:.*]] = wasmssa.abs %[[VAL_0]] : f64
// CHECK: wasmssa.return %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/add_div.mlir b/mlir/test/Target/Wasm/add_div.mlir
new file mode 100644
index 0000000..8a87c60
--- /dev/null
+++ b/mlir/test/Target/Wasm/add_div.mlir
@@ -0,0 +1,40 @@
+// RUN: yaml2obj %S/inputs/add_div.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to create this test:
+ (module $test.wasm
+ (type (;0;) (func (param i32) (result i32)))
+ (type (;1;) (func (param i32 i32) (result i32)))
+ (import "env" "twoTimes" (func $twoTimes (type 0)))
+ (func $add (type 1) (param i32 i32) (result i32)
+ local.get 0
+ call $twoTimes
+ local.get 1
+ call $twoTimes
+ i32.add
+ i32.const 2
+ i32.div_s)
+ (memory (;0;) 2)
+ (global $__stack_pointer (mut i32) (i32.const 66560))
+ (export "memory" (memory 0))
+ (export "add" (func $add)))
+*/
+
+// CHECK-LABEL: wasmssa.import_func "twoTimes" from "env" as @func_0 {type = (i32) -> i32}
+
+// CHECK-LABEL: wasmssa.func exported @add(
+// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>,
+// CHECK-SAME: %[[ARG1:.*]]: !wasmssa<local ref to i32>) -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.call @func_0(%[[VAL_0]]) : (i32) -> i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.local_get %[[ARG1]] : ref to i32
+// CHECK: %[[VAL_3:.*]] = wasmssa.call @func_0(%[[VAL_2]]) : (i32) -> i32
+// CHECK: %[[VAL_4:.*]] = wasmssa.add %[[VAL_1]] %[[VAL_3]] : i32
+// CHECK: %[[VAL_5:.*]] = wasmssa.const 2 : i32
+// CHECK: %[[VAL_6:.*]] = wasmssa.div_si %[[VAL_4]] %[[VAL_5]] : i32
+// CHECK: wasmssa.return %[[VAL_6]] : i32
+// CHECK: }
+// CHECK: wasmssa.memory exported @memory !wasmssa<limit[2:]>
+
+// CHECK-LABEL: wasmssa.global @global_0 i32 mutable : {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 66560 : i32
+// CHECK: wasmssa.return %[[VAL_0]] : i32
diff --git a/mlir/test/Target/Wasm/and.mlir b/mlir/test/Target/Wasm/and.mlir
index 4c0fea0..323d41a 100644
--- a/mlir/test/Target/Wasm/and.mlir
+++ b/mlir/test/Target/Wasm/and.mlir
@@ -14,13 +14,13 @@
)
*/
-// CHECK-LABEL: wasmssa.func @and_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @and_i32() -> i32 {
// CHECK: %0 = wasmssa.const 10 : i32
// CHECK: %1 = wasmssa.const 3 : i32
// CHECK: %2 = wasmssa.and %0 %1 : i32
// CHECK: wasmssa.return %2 : i32
-// CHECK-LABEL: wasmssa.func @and_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @and_i64() -> i64 {
// CHECK: %0 = wasmssa.const 10 : i64
// CHECK: %1 = wasmssa.const 3 : i64
// CHECK: %2 = wasmssa.and %0 %1 : i64
diff --git a/mlir/test/Target/Wasm/block.mlir b/mlir/test/Target/Wasm/block.mlir
new file mode 100644
index 0000000..c85fc1e
--- /dev/null
+++ b/mlir/test/Target/Wasm/block.mlir
@@ -0,0 +1,16 @@
+// RUN: yaml2obj %S/inputs/block.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to create this test:
+(module
+(func(export "i_am_a_block")
+(block $i_am_a_block)
+)
+)
+*/
+
+// CHECK-LABEL: wasmssa.func exported @i_am_a_block() {
+// CHECK: wasmssa.block : {
+// CHECK: wasmssa.block_return
+// CHECK: }> ^bb1
+// CHECK: ^bb1:
+// CHECK: wasmssa.return
diff --git a/mlir/test/Target/Wasm/block_complete_type.mlir b/mlir/test/Target/Wasm/block_complete_type.mlir
new file mode 100644
index 0000000..67df198
--- /dev/null
+++ b/mlir/test/Target/Wasm/block_complete_type.mlir
@@ -0,0 +1,24 @@
+// RUN: yaml2obj %S/inputs/block_complete_type.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to create this test:
+(module
+ (type (;0;) (func (param i32) (result i32)))
+ (type (;1;) (func (result i32)))
+ (func (;0;) (type 1) (result i32)
+ i32.const 14
+ block (param i32) (result i32) ;; label = @1
+ i32.const 1
+ i32.add
+ end))
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 14 : i32
+// CHECK: wasmssa.block(%[[VAL_0]]) : i32 : {
+// CHECK: ^bb0(%[[VAL_1:.*]]: i32):
+// CHECK: %[[VAL_2:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_3:.*]] = wasmssa.add %[[VAL_1]] %[[VAL_2]] : i32
+// CHECK: wasmssa.block_return %[[VAL_3]] : i32
+// CHECK: }> ^bb1
+// CHECK: ^bb1(%[[VAL_4:.*]]: i32):
+// CHECK: wasmssa.return %[[VAL_4]] : i32
diff --git a/mlir/test/Target/Wasm/block_value_type.mlir b/mlir/test/Target/Wasm/block_value_type.mlir
new file mode 100644
index 0000000..fa30f08
--- /dev/null
+++ b/mlir/test/Target/Wasm/block_value_type.mlir
@@ -0,0 +1,19 @@
+// RUN: yaml2obj %S/inputs/block_value_type.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to create this test:
+(module
+ (type (;0;) (func (result i32)))
+ (func (;0;) (type 0) (result i32)
+ block (result i32) ;; label = @1
+ i32.const 17
+ end))
+*/
+
+
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
+// CHECK: wasmssa.block : {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 17 : i32
+// CHECK: wasmssa.block_return %[[VAL_0]] : i32
+// CHECK: }> ^bb1
+// CHECK: ^bb1(%[[VAL_1:.*]]: i32):
+// CHECK: wasmssa.return %[[VAL_1]] : i32
diff --git a/mlir/test/Target/Wasm/branch_if.mlir b/mlir/test/Target/Wasm/branch_if.mlir
new file mode 100644
index 0000000..c91ff37
--- /dev/null
+++ b/mlir/test/Target/Wasm/branch_if.mlir
@@ -0,0 +1,29 @@
+// RUN: yaml2obj %S/inputs/branch_if.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to create this test:
+(module
+ (type $produce_i32 (func (result i32)))
+ (func (type $produce_i32)
+ (block $my_block (type $produce_i32)
+ i32.const 1
+ i32.const 2
+ br_if $my_block
+ i32.const 1
+ i32.add
+ )
+ )
+)
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
+// CHECK: wasmssa.block : {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 2 : i32
+// CHECK: wasmssa.branch_if %[[VAL_1]] to level 0 with args(%[[VAL_0]] : i32) else ^bb1
+// CHECK: ^bb1:
+// CHECK: %[[VAL_2:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_3:.*]] = wasmssa.add %[[VAL_0]] %[[VAL_2]] : i32
+// CHECK: wasmssa.block_return %[[VAL_3]] : i32
+// CHECK: }> ^bb1
+// CHECK: ^bb1(%[[VAL_4:.*]]: i32):
+// CHECK: wasmssa.return %[[VAL_4]] : i32
diff --git a/mlir/test/Target/Wasm/call.mlir b/mlir/test/Target/Wasm/call.mlir
new file mode 100644
index 0000000..c0169aa
--- /dev/null
+++ b/mlir/test/Target/Wasm/call.mlir
@@ -0,0 +1,17 @@
+// RUN: yaml2obj %S/inputs/call.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to create this test:
+(module
+(func $forty_two (result i32)
+i32.const 42)
+(func(export "forty_two")(result i32)
+call $forty_two))
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 42 : i32
+// CHECK: wasmssa.return %[[VAL_0]] : i32
+
+// CHECK-LABEL: wasmssa.func exported @forty_two() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.call @func_0 : () -> i32
+// CHECK: wasmssa.return %[[VAL_0]] : i32
diff --git a/mlir/test/Target/Wasm/clz.mlir b/mlir/test/Target/Wasm/clz.mlir
index 3e6641d..858c09d 100644
--- a/mlir/test/Target/Wasm/clz.mlir
+++ b/mlir/test/Target/Wasm/clz.mlir
@@ -14,12 +14,12 @@
)
*/
-// CHECK-LABEL: wasmssa.func @clz_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @clz_i32() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
// CHECK: %[[VAL_1:.*]] = wasmssa.clz %[[VAL_0]] : i32
// CHECK: wasmssa.return %[[VAL_1]] : i32
-// CHECK-LABEL: wasmssa.func @clz_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @clz_i64() -> i64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
// CHECK: %[[VAL_1:.*]] = wasmssa.clz %[[VAL_0]] : i64
// CHECK: wasmssa.return %[[VAL_1]] : i64
diff --git a/mlir/test/Target/Wasm/comparison_ops.mlir b/mlir/test/Target/Wasm/comparison_ops.mlir
new file mode 100644
index 0000000..91e3a6a
--- /dev/null
+++ b/mlir/test/Target/Wasm/comparison_ops.mlir
@@ -0,0 +1,269 @@
+// RUN: yaml2obj %S/inputs/comparison_ops.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+/* Source code used to create this test:
+(module
+ (func $lt_si32 (result i32)
+ i32.const 12
+ i32.const 50
+ i32.lt_s
+ )
+ (func $le_si32 (result i32)
+ i32.const 12
+ i32.const 50
+ i32.le_s
+ )
+ (func $lt_ui32 (result i32)
+ i32.const 12
+ i32.const 50
+ i32.lt_u
+ )
+ (func $le_ui32 (result i32)
+ i32.const 12
+ i32.const 50
+ i32.le_u
+ )
+ (func $gt_si32 (result i32)
+ i32.const 12
+ i32.const 50
+ i32.gt_s
+ )
+ (func $gt_ui32 (result i32)
+ i32.const 12
+ i32.const 50
+ i32.gt_u
+ )
+ (func $ge_si32 (result i32)
+ i32.const 12
+ i32.const 50
+ i32.ge_s
+ )
+ (func $ge_ui32 (result i32)
+ i32.const 12
+ i32.const 50
+ i32.ge_u
+ )
+ (func $lt_si64 (result i32)
+ i64.const 12
+ i64.const 50
+ i64.lt_s
+ )
+ (func $le_si64 (result i32)
+ i64.const 12
+ i64.const 50
+ i64.le_s
+ )
+ (func $lt_ui64 (result i32)
+ i64.const 12
+ i64.const 50
+ i64.lt_u
+ )
+ (func $le_ui64 (result i32)
+ i64.const 12
+ i64.const 50
+ i64.le_u
+ )
+ (func $gt_si64 (result i32)
+ i64.const 12
+ i64.const 50
+ i64.gt_s
+ )
+ (func $gt_ui64 (result i32)
+ i64.const 12
+ i64.const 50
+ i64.gt_u
+ )
+ (func $ge_si64 (result i32)
+ i64.const 12
+ i64.const 50
+ i64.ge_s
+ )
+ (func $ge_ui64 (result i32)
+ i64.const 12
+ i64.const 50
+ i64.ge_u
+ )
+ (func $lt_f32 (result i32)
+ f32.const 5
+ f32.const 14
+ f32.lt
+ )
+ (func $le_f32 (result i32)
+ f32.const 5
+ f32.const 14
+ f32.le
+ )
+ (func $gt_f32 (result i32)
+ f32.const 5
+ f32.const 14
+ f32.gt
+ )
+ (func $ge_f32 (result i32)
+ f32.const 5
+ f32.const 14
+ f32.ge
+ )
+ (func $lt_f64 (result i32)
+ f64.const 5
+ f64.const 14
+ f64.lt
+ )
+ (func $le_f64 (result i32)
+ f64.const 5
+ f64.const 14
+ f64.le
+ )
+ (func $gt_f64 (result i32)
+ f64.const 5
+ f64.const 14
+ f64.gt
+ )
+ (func $ge_f64 (result i32)
+ f64.const 5
+ f64.const 14
+ f64.ge
+ )
+)
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.lt_si %[[VAL_0]] %[[VAL_1]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_1() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.le_si %[[VAL_0]] %[[VAL_1]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_2() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.lt_ui %[[VAL_0]] %[[VAL_1]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_3() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.le_ui %[[VAL_0]] %[[VAL_1]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_4() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.gt_si %[[VAL_0]] %[[VAL_1]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_5() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.gt_ui %[[VAL_0]] %[[VAL_1]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_6() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.ge_si %[[VAL_0]] %[[VAL_1]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_7() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.ge_ui %[[VAL_0]] %[[VAL_1]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_8() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i64
+// CHECK: %[[VAL_2:.*]] = wasmssa.lt_si %[[VAL_0]] %[[VAL_1]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_9() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i64
+// CHECK: %[[VAL_2:.*]] = wasmssa.le_si %[[VAL_0]] %[[VAL_1]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_10() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i64
+// CHECK: %[[VAL_2:.*]] = wasmssa.lt_ui %[[VAL_0]] %[[VAL_1]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_11() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i64
+// CHECK: %[[VAL_2:.*]] = wasmssa.le_ui %[[VAL_0]] %[[VAL_1]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_12() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i64
+// CHECK: %[[VAL_2:.*]] = wasmssa.gt_si %[[VAL_0]] %[[VAL_1]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_13() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i64
+// CHECK: %[[VAL_2:.*]] = wasmssa.gt_ui %[[VAL_0]] %[[VAL_1]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_14() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i64
+// CHECK: %[[VAL_2:.*]] = wasmssa.ge_si %[[VAL_0]] %[[VAL_1]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_15() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i64
+// CHECK: %[[VAL_2:.*]] = wasmssa.ge_ui %[[VAL_0]] %[[VAL_1]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_16() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f32
+// CHECK: %[[VAL_2:.*]] = wasmssa.lt %[[VAL_0]] %[[VAL_1]] : f32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_17() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f32
+// CHECK: %[[VAL_2:.*]] = wasmssa.le %[[VAL_0]] %[[VAL_1]] : f32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_18() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f32
+// CHECK: %[[VAL_2:.*]] = wasmssa.gt %[[VAL_0]] %[[VAL_1]] : f32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_19() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f32
+// CHECK: %[[VAL_2:.*]] = wasmssa.ge %[[VAL_0]] %[[VAL_1]] : f32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_20() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f64
+// CHECK: %[[VAL_2:.*]] = wasmssa.lt %[[VAL_0]] %[[VAL_1]] : f64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_21() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f64
+// CHECK: %[[VAL_2:.*]] = wasmssa.le %[[VAL_0]] %[[VAL_1]] : f64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_22() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f64
+// CHECK: %[[VAL_2:.*]] = wasmssa.gt %[[VAL_0]] %[[VAL_1]] : f64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_23() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f64
+// CHECK: %[[VAL_2:.*]] = wasmssa.ge %[[VAL_0]] %[[VAL_1]] : f64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
diff --git a/mlir/test/Target/Wasm/const.mlir b/mlir/test/Target/Wasm/const.mlir
index aa9e76f..adb792a 100644
--- a/mlir/test/Target/Wasm/const.mlir
+++ b/mlir/test/Target/Wasm/const.mlir
@@ -16,22 +16,22 @@
)
*/
-// CHECK-LABEL: wasmssa.func nested @func_0() -> i32 {
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1 : i32
// CHECK: wasmssa.return %[[VAL_0]] : i32
// CHECK: }
-// CHECK-LABEL: wasmssa.func nested @func_1() -> i64 {
+// CHECK-LABEL: wasmssa.func @func_1() -> i64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 3 : i64
// CHECK: wasmssa.return %[[VAL_0]] : i64
// CHECK: }
-// CHECK-LABEL: wasmssa.func nested @func_2() -> f32 {
+// CHECK-LABEL: wasmssa.func @func_2() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 4.000000e+00 : f32
// CHECK: wasmssa.return %[[VAL_0]] : f32
// CHECK: }
-// CHECK-LABEL: wasmssa.func nested @func_3() -> f64 {
+// CHECK-LABEL: wasmssa.func @func_3() -> f64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 9.000000e+00 : f64
// CHECK: wasmssa.return %[[VAL_0]] : f64
// CHECK: }
diff --git a/mlir/test/Target/Wasm/convert.mlir b/mlir/test/Target/Wasm/convert.mlir
new file mode 100644
index 0000000..ddc29a7
--- /dev/null
+++ b/mlir/test/Target/Wasm/convert.mlir
@@ -0,0 +1,85 @@
+// RUN: yaml2obj %S/inputs/convert.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to generate this test:
+(module
+ (func (export "convert_i32_u_to_f32") (result f32)
+ i32.const 10
+ f32.convert_i32_u
+ )
+
+ (func (export "convert_i32_s_to_f32") (result f32)
+ i32.const 42
+ f32.convert_i32_s
+ )
+
+ (func (export "convert_i64_u_to_f32") (result f32)
+ i64.const 17
+ f32.convert_i64_u
+ )
+
+ (func (export "convert_i64s_to_f32") (result f32)
+ i64.const 10
+ f32.convert_i64_s
+ )
+
+ (func (export "convert_i32_u_to_f64") (result f64)
+ i32.const 10
+ f64.convert_i32_u
+ )
+
+ (func (export "convert_i32_s_to_f64") (result f64)
+ i32.const 42
+ f64.convert_i32_s
+ )
+
+ (func (export "convert_i64_u_to_f64") (result f64)
+ i64.const 17
+ f64.convert_i64_u
+ )
+
+ (func (export "convert_i64s_to_f64") (result f64)
+ i64.const 10
+ f64.convert_i64_s
+ )
+)
+*/
+
+// CHECK-LABEL: wasmssa.func exported @convert_i32_u_to_f32() -> f32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.convert_u %[[VAL_0]] : i32 to f32
+// CHECK: wasmssa.return %[[VAL_1]] : f32
+
+// CHECK-LABEL: wasmssa.func exported @convert_i32_s_to_f32() -> f32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 42 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.convert_s %[[VAL_0]] : i32 to f32
+// CHECK: wasmssa.return %[[VAL_1]] : f32
+
+// CHECK-LABEL: wasmssa.func exported @convert_i64_u_to_f32() -> f32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 17 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.convert_u %[[VAL_0]] : i64 to f32
+// CHECK: wasmssa.return %[[VAL_1]] : f32
+
+// CHECK-LABEL: wasmssa.func exported @convert_i64s_to_f32() -> f32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.convert_s %[[VAL_0]] : i64 to f32
+// CHECK: wasmssa.return %[[VAL_1]] : f32
+
+// CHECK-LABEL: wasmssa.func exported @convert_i32_u_to_f64() -> f64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.convert_u %[[VAL_0]] : i32 to f64
+// CHECK: wasmssa.return %[[VAL_1]] : f64
+
+// CHECK-LABEL: wasmssa.func exported @convert_i32_s_to_f64() -> f64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 42 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.convert_s %[[VAL_0]] : i32 to f64
+// CHECK: wasmssa.return %[[VAL_1]] : f64
+
+// CHECK-LABEL: wasmssa.func exported @convert_i64_u_to_f64() -> f64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 17 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.convert_u %[[VAL_0]] : i64 to f64
+// CHECK: wasmssa.return %[[VAL_1]] : f64
+
+// CHECK-LABEL: wasmssa.func exported @convert_i64s_to_f64() -> f64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.convert_s %[[VAL_0]] : i64 to f64
+// CHECK: wasmssa.return %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/copysign.mlir b/mlir/test/Target/Wasm/copysign.mlir
index 33d7a56..90c5b11 100644
--- a/mlir/test/Target/Wasm/copysign.mlir
+++ b/mlir/test/Target/Wasm/copysign.mlir
@@ -16,14 +16,14 @@
)
*/
-// CHECK-LABEL: wasmssa.func @copysign_f32() -> f32 {
+// CHECK-LABEL: wasmssa.func exported @copysign_f32() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f32
// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.000000e+00 : f32
// CHECK: %[[VAL_2:.*]] = wasmssa.copysign %[[VAL_0]] %[[VAL_1]] : f32
// CHECK: wasmssa.return %[[VAL_2]] : f32
// CHECK: }
-// CHECK-LABEL: wasmssa.func @copysign_f64() -> f64 {
+// CHECK-LABEL: wasmssa.func exported @copysign_f64() -> f64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f64
// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.000000e+00 : f64
// CHECK: %[[VAL_2:.*]] = wasmssa.copysign %[[VAL_0]] %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/ctz.mlir b/mlir/test/Target/Wasm/ctz.mlir
index 6c0806f..9e7cc5e 100644
--- a/mlir/test/Target/Wasm/ctz.mlir
+++ b/mlir/test/Target/Wasm/ctz.mlir
@@ -14,12 +14,12 @@
)
*/
-// CHECK-LABEL: wasmssa.func @ctz_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @ctz_i32() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
// CHECK: %[[VAL_1:.*]] = wasmssa.ctz %[[VAL_0]] : i32
// CHECK: wasmssa.return %[[VAL_1]] : i32
-// CHECK-LABEL: wasmssa.func @ctz_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @ctz_i64() -> i64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
// CHECK: %[[VAL_1:.*]] = wasmssa.ctz %[[VAL_0]] : i64
// CHECK: wasmssa.return %[[VAL_1]] : i64
diff --git a/mlir/test/Target/Wasm/demote.mlir b/mlir/test/Target/Wasm/demote.mlir
new file mode 100644
index 0000000..3d2bc05
--- /dev/null
+++ b/mlir/test/Target/Wasm/demote.mlir
@@ -0,0 +1,15 @@
+// RUN: yaml2obj %S/inputs/demote.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to create this test:
+(module
+ (func $main (result f32)
+ f64.const 2.24
+ f32.demote_f64
+ )
+)
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> f32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 2.240000e+00 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.demote %[[VAL_0]] : f64 to f32
+// CHECK: wasmssa.return %[[VAL_1]] : f32
diff --git a/mlir/test/Target/Wasm/div.mlir b/mlir/test/Target/Wasm/div.mlir
index c91f780..4967d96 100644
--- a/mlir/test/Target/Wasm/div.mlir
+++ b/mlir/test/Target/Wasm/div.mlir
@@ -66,61 +66,61 @@
)
*/
-// CHECK-LABEL: wasmssa.func @div_u_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @div_u_i32() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
// CHECK: %[[VAL_1:.*]] = wasmssa.const 2 : i32
// CHECK: %[[VAL_2:.*]] = wasmssa.div_ui %[[VAL_0]] %[[VAL_1]] : i32
// CHECK: wasmssa.return %[[VAL_2]] : i32
-// CHECK-LABEL: wasmssa.func @div_u_i32_zero() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @div_u_i32_zero() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
// CHECK: %[[VAL_1:.*]] = wasmssa.const 0 : i32
// CHECK: %[[VAL_2:.*]] = wasmssa.div_ui %[[VAL_0]] %[[VAL_1]] : i32
// CHECK: wasmssa.return %[[VAL_2]] : i32
-// CHECK-LABEL: wasmssa.func @div_s_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @div_s_i32() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
// CHECK: %[[VAL_1:.*]] = wasmssa.const 2 : i32
// CHECK: %[[VAL_2:.*]] = wasmssa.div_si %[[VAL_0]] %[[VAL_1]] : i32
// CHECK: wasmssa.return %[[VAL_2]] : i32
-// CHECK-LABEL: wasmssa.func @div_s_i32_zero() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @div_s_i32_zero() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
// CHECK: %[[VAL_1:.*]] = wasmssa.const 0 : i32
// CHECK: %[[VAL_2:.*]] = wasmssa.div_si %[[VAL_0]] %[[VAL_1]] : i32
// CHECK: wasmssa.return %[[VAL_2]] : i32
-// CHECK-LABEL: wasmssa.func @div_u_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @div_u_i64() -> i64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
// CHECK: %[[VAL_1:.*]] = wasmssa.const 2 : i64
// CHECK: %[[VAL_2:.*]] = wasmssa.div_ui %[[VAL_0]] %[[VAL_1]] : i64
// CHECK: wasmssa.return %[[VAL_2]] : i64
-// CHECK-LABEL: wasmssa.func @div_u_i64_zero() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @div_u_i64_zero() -> i64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
// CHECK: %[[VAL_1:.*]] = wasmssa.const 0 : i64
// CHECK: %[[VAL_2:.*]] = wasmssa.div_ui %[[VAL_0]] %[[VAL_1]] : i64
// CHECK: wasmssa.return %[[VAL_2]] : i64
-// CHECK-LABEL: wasmssa.func @div_s_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @div_s_i64() -> i64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
// CHECK: %[[VAL_1:.*]] = wasmssa.const 2 : i64
// CHECK: %[[VAL_2:.*]] = wasmssa.div_si %[[VAL_0]] %[[VAL_1]] : i64
// CHECK: wasmssa.return %[[VAL_2]] : i64
-// CHECK-LABEL: wasmssa.func @div_s_i64_zero() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @div_s_i64_zero() -> i64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
// CHECK: %[[VAL_1:.*]] = wasmssa.const 0 : i64
// CHECK: %[[VAL_2:.*]] = wasmssa.div_si %[[VAL_0]] %[[VAL_1]] : i64
// CHECK: wasmssa.return %[[VAL_2]] : i64
-// CHECK-LABEL: wasmssa.func @div_f32() -> f32 {
+// CHECK-LABEL: wasmssa.func exported @div_f32() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f32
// CHECK: %[[VAL_1:.*]] = wasmssa.const 2.000000e+00 : f32
// CHECK: %[[VAL_2:.*]] = wasmssa.div %[[VAL_0]] %[[VAL_1]] : f32
// CHECK: wasmssa.return %[[VAL_2]] : f32
-// CHECK-LABEL: wasmssa.func @div_f64() -> f64 {
+// CHECK-LABEL: wasmssa.func exported @div_f64() -> f64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f64
// CHECK: %[[VAL_1:.*]] = wasmssa.const 2.000000e+00 : f64
// CHECK: %[[VAL_2:.*]] = wasmssa.div %[[VAL_0]] %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/double_nested_loop.mlir b/mlir/test/Target/Wasm/double_nested_loop.mlir
new file mode 100644
index 0000000..8b3e499
--- /dev/null
+++ b/mlir/test/Target/Wasm/double_nested_loop.mlir
@@ -0,0 +1,63 @@
+// RUN: yaml2obj %S/inputs/double_nested_loop.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/*
+(module
+ (func
+ ;; create a local variable and initialize it to 0
+ (local $i i32)
+ (local $j i32)
+
+ (loop $my_loop
+
+ ;; add one to $i
+ local.get $i
+ i32.const 1
+ i32.add
+ local.set $i
+ (loop $my_second_loop (result i32)
+ i32.const 1
+ local.get $j
+ i32.const 12
+ i32.add
+ local.tee $j
+ local.get $i
+ i32.gt_s
+ br_if $my_second_loop
+ )
+ i32.const 10
+ i32.lt_s
+ br_if $my_loop
+ )
+ )
+)
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0() {
+// CHECK: %[[VAL_0:.*]] = wasmssa.local of type i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.local of type i32
+// CHECK: wasmssa.loop : {
+// CHECK: %[[VAL_2:.*]] = wasmssa.local_get %[[VAL_0]] : ref to i32
+// CHECK: %[[VAL_3:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_4:.*]] = wasmssa.add %[[VAL_2]] %[[VAL_3]] : i32
+// CHECK: wasmssa.local_set %[[VAL_0]] : ref to i32 to %[[VAL_4]] : i32
+// CHECK: wasmssa.loop : {
+// CHECK: %[[VAL_5:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_6:.*]] = wasmssa.local_get %[[VAL_1]] : ref to i32
+// CHECK: %[[VAL_7:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_8:.*]] = wasmssa.add %[[VAL_6]] %[[VAL_7]] : i32
+// CHECK: %[[VAL_9:.*]] = wasmssa.local_tee %[[VAL_1]] : ref to i32 to %[[VAL_8]] : i32
+// CHECK: %[[VAL_10:.*]] = wasmssa.local_get %[[VAL_0]] : ref to i32
+// CHECK: %[[VAL_11:.*]] = wasmssa.gt_si %[[VAL_9]] %[[VAL_10]] : i32 -> i32
+// CHECK: wasmssa.branch_if %[[VAL_11]] to level 0 else ^bb1
+// CHECK: ^bb1:
+// CHECK: wasmssa.block_return %[[VAL_5]] : i32
+// CHECK: }> ^bb1
+// CHECK: ^bb1(%[[VAL_12:.*]]: i32):
+// CHECK: %[[VAL_13:.*]] = wasmssa.const 10 : i32
+// CHECK: %[[VAL_14:.*]] = wasmssa.lt_si %[[VAL_12]] %[[VAL_13]] : i32 -> i32
+// CHECK: wasmssa.branch_if %[[VAL_14]] to level 0 else ^bb2
+// CHECK: ^bb2:
+// CHECK: wasmssa.block_return
+// CHECK: }> ^bb1
+// CHECK: ^bb1:
+// CHECK: wasmssa.return
diff --git a/mlir/test/Target/Wasm/empty_blocks_list_and_stack.mlir b/mlir/test/Target/Wasm/empty_blocks_list_and_stack.mlir
new file mode 100644
index 0000000..5c98f1a
--- /dev/null
+++ b/mlir/test/Target/Wasm/empty_blocks_list_and_stack.mlir
@@ -0,0 +1,53 @@
+// RUN: yaml2obj %S/inputs/empty_blocks_list_and_stack.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to create this test:
+(module
+ (func (param $num i32)
+ (block $b1
+ (block $b2
+ (block $b3
+ )
+ )
+ )
+ )
+
+ (func (param $num i32)
+ (block $b1)
+ (block $b2)
+ (block $b3)
+ )
+)
+
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0(
+// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) {
+// CHECK: wasmssa.block : {
+// CHECK: wasmssa.block : {
+// CHECK: wasmssa.block : {
+// CHECK: wasmssa.block_return
+// CHECK: }> ^bb1
+// CHECK: ^bb1:
+// CHECK: wasmssa.block_return
+// CHECK: }> ^bb1
+// CHECK: ^bb1:
+// CHECK: wasmssa.block_return
+// CHECK: }> ^bb1
+// CHECK: ^bb1:
+// CHECK: wasmssa.return
+
+// CHECK-LABEL: wasmssa.func @func_1(
+// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) {
+// CHECK: wasmssa.block : {
+// CHECK: wasmssa.block_return
+// CHECK: }> ^bb1
+// CHECK: ^bb1:
+// CHECK: wasmssa.block : {
+// CHECK: wasmssa.block_return
+// CHECK: }> ^bb2
+// CHECK: ^bb2:
+// CHECK: wasmssa.block : {
+// CHECK: wasmssa.block_return
+// CHECK: }> ^bb3
+// CHECK: ^bb3:
+// CHECK: wasmssa.return
diff --git a/mlir/test/Target/Wasm/eq.mlir b/mlir/test/Target/Wasm/eq.mlir
new file mode 100644
index 0000000..ba3ae2f
--- /dev/null
+++ b/mlir/test/Target/Wasm/eq.mlir
@@ -0,0 +1,56 @@
+// RUN: yaml2obj %S/inputs/eq.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+/* Source code used to create this test:
+(module
+ (func $eq_i32 (result i32)
+ i32.const 12
+ i32.const 50
+ i32.eq
+ )
+
+ (func $eq_i64 (result i32)
+ i64.const 20
+ i64.const 5
+ i64.eq
+ )
+
+ (func $eq_f32 (result i32)
+ f32.const 5
+ f32.const 14
+ f32.eq
+ )
+
+ (func $eq_f64 (result i32)
+ f64.const 17
+ f64.const 0
+ f64.eq
+ )
+)
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.eq %[[VAL_0]] %[[VAL_1]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+// CHECK: }
+
+// CHECK-LABEL: wasmssa.func @func_1() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 20 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 5 : i64
+// CHECK: %[[VAL_2:.*]] = wasmssa.eq %[[VAL_0]] %[[VAL_1]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+// CHECK: }
+
+// CHECK-LABEL: wasmssa.func @func_2() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f32
+// CHECK: %[[VAL_2:.*]] = wasmssa.eq %[[VAL_0]] %[[VAL_1]] : f32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+// CHECK: }
+
+// CHECK-LABEL: wasmssa.func @func_3() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.700000e+01 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 0.000000e+00 : f64
+// CHECK: %[[VAL_2:.*]] = wasmssa.eq %[[VAL_0]] %[[VAL_1]] : f64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+// CHECK: }
diff --git a/mlir/test/Target/Wasm/eqz.mlir b/mlir/test/Target/Wasm/eqz.mlir
new file mode 100644
index 0000000..55cf94a
--- /dev/null
+++ b/mlir/test/Target/Wasm/eqz.mlir
@@ -0,0 +1,21 @@
+// RUN: yaml2obj %S/inputs/eqz.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+/* Source code used to create this test:
+(module
+ (func (export "eqz_i32") (result i32)
+ i32.const 13
+ i32.eqz)
+
+ (func (export "eqz_i64") (result i32)
+ i64.const 13
+ i64.eqz)
+)
+*/
+// CHECK-LABEL: wasmssa.func exported @eqz_i32() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 13 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.eqz %[[VAL_0]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_1]] : i32
+
+// CHECK-LABEL: wasmssa.func exported @eqz_i64() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 13 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.eqz %[[VAL_0]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_1]] : i32
diff --git a/mlir/test/Target/Wasm/extend.mlir b/mlir/test/Target/Wasm/extend.mlir
new file mode 100644
index 0000000..5d4446a
--- /dev/null
+++ b/mlir/test/Target/Wasm/extend.mlir
@@ -0,0 +1,69 @@
+// RUN: yaml2obj %S/inputs/extend.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to create this test:
+(module
+ (func $i32_s (result i64)
+ i32.const 10
+ i64.extend_i32_s
+ )
+ (func $i32_u (result i64)
+ i32.const 10
+ i64.extend_i32_u
+ )
+ (func $extend8_32 (result i32)
+ i32.const 10
+ i32.extend8_s
+ )
+ (func $extend16_32 (result i32)
+ i32.const 10
+ i32.extend16_s
+ )
+ (func $extend8_64 (result i64)
+ i64.const 10
+ i64.extend8_s
+ )
+ (func $extend16_64 (result i64)
+ i64.const 10
+ i64.extend16_s
+ )
+ (func $extend32_64 (result i64)
+ i64.const 10
+ i64.extend32_s
+ )
+)
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> i64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.extend_i32_s %[[VAL_0]] to i64
+// CHECK: wasmssa.return %[[VAL_1]] : i64
+
+// CHECK-LABEL: wasmssa.func @func_1() -> i64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.extend_i32_u %[[VAL_0]] to i64
+// CHECK: wasmssa.return %[[VAL_1]] : i64
+
+// CHECK-LABEL: wasmssa.func @func_2() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.extend 8 : ui32 low bits from %[[VAL_0]] : i32
+// CHECK: wasmssa.return %[[VAL_1]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_3() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.extend 16 : ui32 low bits from %[[VAL_0]] : i32
+// CHECK: wasmssa.return %[[VAL_1]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_4() -> i64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.extend 8 : ui32 low bits from %[[VAL_0]] : i64
+// CHECK: wasmssa.return %[[VAL_1]] : i64
+
+// CHECK-LABEL: wasmssa.func @func_5() -> i64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.extend 16 : ui32 low bits from %[[VAL_0]] : i64
+// CHECK: wasmssa.return %[[VAL_1]] : i64
+
+// CHECK-LABEL: wasmssa.func @func_6() -> i64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.extend 32 : ui32 low bits from %[[VAL_0]] : i64
+// CHECK: wasmssa.return %[[VAL_1]] : i64
diff --git a/mlir/test/Target/Wasm/global.mlir b/mlir/test/Target/Wasm/global.mlir
index e72fe69..1e4fe44 100644
--- a/mlir/test/Target/Wasm/global.mlir
+++ b/mlir/test/Target/Wasm/global.mlir
@@ -29,9 +29,9 @@ i32.add
)
*/
-// CHECK-LABEL: wasmssa.import_global "from_js" from "env" as @global_0 nested : i32
+// CHECK-LABEL: wasmssa.import_global "from_js" from "env" as @global_0 : i32
-// CHECK-LABEL: wasmssa.func nested @func_0() -> i32 {
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.global_get @global_0 : i32
// CHECK: %[[VAL_1:.*]] = wasmssa.global_get @global_1 : i32
// CHECK: %[[VAL_2:.*]] = wasmssa.add %[[VAL_0]] %[[VAL_1]] : i32
@@ -41,26 +41,26 @@ i32.add
// CHECK: %[[VAL_6:.*]] = wasmssa.add %[[VAL_2]] %[[VAL_5]] : i32
// CHECK: wasmssa.return %[[VAL_6]] : i32
-// CHECK-LABEL: wasmssa.global @global_1 i32 nested : {
+// CHECK-LABEL: wasmssa.global @global_1 i32 : {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
// CHECK: wasmssa.return %[[VAL_0]] : i32
-// CHECK-LABEL: wasmssa.global @global_2 i32 mutable nested : {
+// CHECK-LABEL: wasmssa.global @global_2 i32 mutable : {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
// CHECK: wasmssa.return %[[VAL_0]] : i32
-// CHECK-LABEL: wasmssa.global @global_3 i32 mutable nested : {
+// CHECK-LABEL: wasmssa.global @global_3 i32 mutable : {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
// CHECK: wasmssa.return %[[VAL_0]] : i32
-// CHECK-LABEL: wasmssa.global @global_4 i64 nested : {
+// CHECK-LABEL: wasmssa.global @global_4 i64 : {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 11 : i64
// CHECK: wasmssa.return %[[VAL_0]] : i64
-// CHECK-LABEL: wasmssa.global @global_5 f32 nested : {
+// CHECK-LABEL: wasmssa.global @global_5 f32 : {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.200000e+01 : f32
// CHECK: wasmssa.return %[[VAL_0]] : f32
-// CHECK-LABEL: wasmssa.global @global_6 f64 nested : {
+// CHECK-LABEL: wasmssa.global @global_6 f64 : {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.300000e+01 : f64
// CHECK: wasmssa.return %[[VAL_0]] : f64
diff --git a/mlir/test/Target/Wasm/if.mlir b/mlir/test/Target/Wasm/if.mlir
new file mode 100644
index 0000000..2d7bfbe
--- /dev/null
+++ b/mlir/test/Target/Wasm/if.mlir
@@ -0,0 +1,112 @@
+// RUN: yaml2obj %S/inputs/if.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to create this test:
+(module
+(type $intMapper (func (param $input i32) (result i32)))
+(func $if_else (type $intMapper)
+ local.get 0
+ i32.const 1
+ i32.and
+ if $isOdd (result i32)
+ local.get 0
+ i32.const 3
+ i32.mul
+ i32.const 1
+ i32.add
+ else
+ local.get 0
+ i32.const 1
+ i32.shr_u
+ end
+)
+
+(func $if_only (type $intMapper)
+ local.get 0
+ local.get 0
+ i32.const 1
+ i32.and
+ if $isOdd (type $intMapper)
+ i32.const 1
+ i32.add
+ end
+)
+
+(func $if_if (type $intMapper)
+ local.get 0
+ i32.ctz
+ if $isEven (result i32)
+ i32.const 2
+ local.get 0
+ i32.const 1
+ i32.shr_u
+ i32.ctz
+ if $isMultipleOfFour (type $intMapper)
+ i32.const 2
+ i32.add
+ end
+ else
+ i32.const 1
+ end
+)
+)
+*/
+// CHECK-LABEL: wasmssa.func @func_0(
+// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.and %[[VAL_0]] %[[VAL_1]] : i32
+// CHECK: wasmssa.if %[[VAL_2]] : {
+// CHECK: %[[VAL_3:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
+// CHECK: %[[VAL_4:.*]] = wasmssa.const 3 : i32
+// CHECK: %[[VAL_5:.*]] = wasmssa.mul %[[VAL_3]] %[[VAL_4]] : i32
+// CHECK: %[[VAL_6:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_7:.*]] = wasmssa.add %[[VAL_5]] %[[VAL_6]] : i32
+// CHECK: wasmssa.block_return %[[VAL_7]] : i32
+// CHECK: } "else "{
+// CHECK: %[[VAL_8:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
+// CHECK: %[[VAL_9:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_10:.*]] = wasmssa.shr_u %[[VAL_8]] by %[[VAL_9]] bits : i32
+// CHECK: wasmssa.block_return %[[VAL_10]] : i32
+// CHECK: }> ^bb1
+// CHECK: ^bb1(%[[VAL_11:.*]]: i32):
+// CHECK: wasmssa.return %[[VAL_11]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_1(
+// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_3:.*]] = wasmssa.and %[[VAL_1]] %[[VAL_2]] : i32
+// CHECK: wasmssa.if %[[VAL_3]](%[[VAL_0]]) : i32 : {
+// CHECK: ^bb0(%[[VAL_4:.*]]: i32):
+// CHECK: %[[VAL_5:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_6:.*]] = wasmssa.add %[[VAL_4]] %[[VAL_5]] : i32
+// CHECK: wasmssa.block_return %[[VAL_6]] : i32
+// CHECK: } > ^bb1
+// CHECK: ^bb1(%[[VAL_7:.*]]: i32):
+// CHECK: wasmssa.return %[[VAL_7]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_2(
+// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.ctz %[[VAL_0]] : i32
+// CHECK: wasmssa.if %[[VAL_1]] : {
+// CHECK: %[[VAL_2:.*]] = wasmssa.const 2 : i32
+// CHECK: %[[VAL_3:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
+// CHECK: %[[VAL_4:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_5:.*]] = wasmssa.shr_u %[[VAL_3]] by %[[VAL_4]] bits : i32
+// CHECK: %[[VAL_6:.*]] = wasmssa.ctz %[[VAL_5]] : i32
+// CHECK: wasmssa.if %[[VAL_6]](%[[VAL_2]]) : i32 : {
+// CHECK: ^bb0(%[[VAL_7:.*]]: i32):
+// CHECK: %[[VAL_8:.*]] = wasmssa.const 2 : i32
+// CHECK: %[[VAL_9:.*]] = wasmssa.add %[[VAL_7]] %[[VAL_8]] : i32
+// CHECK: wasmssa.block_return %[[VAL_9]] : i32
+// CHECK: } > ^bb1
+// CHECK: ^bb1(%[[VAL_10:.*]]: i32):
+// CHECK: wasmssa.block_return %[[VAL_10]] : i32
+// CHECK: } "else "{
+// CHECK: %[[VAL_11:.*]] = wasmssa.const 1 : i32
+// CHECK: wasmssa.block_return %[[VAL_11]] : i32
+// CHECK: }> ^bb1
+// CHECK: ^bb1(%[[VAL_12:.*]]: i32):
+// CHECK: wasmssa.return %[[VAL_12]] : i32
diff --git a/mlir/test/Target/Wasm/import.mlir b/mlir/test/Target/Wasm/import.mlir
index 541dcf3..dcdfa52 100644
--- a/mlir/test/Target/Wasm/import.mlir
+++ b/mlir/test/Target/Wasm/import.mlir
@@ -11,9 +11,9 @@
)
*/
-// CHECK-LABEL: wasmssa.import_func "foo" from "my_module" as @func_0 {sym_visibility = "nested", type = (i32) -> ()}
-// CHECK: wasmssa.import_func "bar" from "my_module" as @func_1 {sym_visibility = "nested", type = (i32) -> ()}
-// CHECK: wasmssa.import_table "table" from "my_module" as @table_0 {sym_visibility = "nested", type = !wasmssa<tabletype !wasmssa.funcref [2:]>}
-// CHECK: wasmssa.import_mem "mem" from "my_module" as @mem_0 {limits = !wasmssa<limit[2:]>, sym_visibility = "nested"}
-// CHECK: wasmssa.import_global "glob" from "my_module" as @global_0 nested : i32
-// CHECK: wasmssa.import_global "glob_mut" from "my_other_module" as @global_1 mutable nested : i32
+// CHECK-LABEL: wasmssa.import_func "foo" from "my_module" as @func_0 {type = (i32) -> ()}
+// CHECK: wasmssa.import_func "bar" from "my_module" as @func_1 {type = (i32) -> ()}
+// CHECK: wasmssa.import_table "table" from "my_module" as @table_0 {type = !wasmssa<tabletype !wasmssa.funcref [2:]>}
+// CHECK: wasmssa.import_mem "mem" from "my_module" as @mem_0 {limits = !wasmssa<limit[2:]>}
+// CHECK: wasmssa.import_global "glob" from "my_module" as @global_0 : i32
+// CHECK: wasmssa.import_global "glob_mut" from "my_other_module" as @global_1 mutable : i32
diff --git a/mlir/test/Target/Wasm/inputs/add_div.yaml.wasm b/mlir/test/Target/Wasm/inputs/add_div.yaml.wasm
new file mode 100644
index 0000000..865c315
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/add_div.yaml.wasm
@@ -0,0 +1,50 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes:
+ - I32
+ ReturnTypes:
+ - I32
+ - Index: 1
+ ParamTypes:
+ - I32
+ - I32
+ ReturnTypes:
+ - I32
+ - Type: IMPORT
+ Imports:
+ - Module: env
+ Field: twoTimes
+ Kind: FUNCTION
+ SigIndex: 0
+ - Type: FUNCTION
+ FunctionTypes: [ 1 ]
+ - Type: MEMORY
+ Memories:
+ - Minimum: 0x2
+ - Type: GLOBAL
+ Globals:
+ - Index: 0
+ Type: I32
+ Mutable: true
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 66560
+ - Type: EXPORT
+ Exports:
+ - Name: memory
+ Kind: MEMORY
+ Index: 0
+ - Name: add
+ Kind: FUNCTION
+ Index: 1
+ - Type: CODE
+ Functions:
+ - Index: 1
+ Locals: []
+ Body: 20001000200110006A41026D0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/block.yaml.wasm b/mlir/test/Target/Wasm/inputs/block.yaml.wasm
new file mode 100644
index 0000000..dd5118a
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/block.yaml.wasm
@@ -0,0 +1,22 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes: []
+ - Type: FUNCTION
+ FunctionTypes: [ 0 ]
+ - Type: EXPORT
+ Exports:
+ - Name: i_am_a_block
+ Kind: FUNCTION
+ Index: 0
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 02400B0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/block_complete_type.yaml.wasm b/mlir/test/Target/Wasm/inputs/block_complete_type.yaml.wasm
new file mode 100644
index 0000000..7a125bf
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/block_complete_type.yaml.wasm
@@ -0,0 +1,23 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes:
+ - I32
+ ReturnTypes:
+ - I32
+ - Index: 1
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 1 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 410E020041016A0B0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/block_value_type.yaml.wasm b/mlir/test/Target/Wasm/inputs/block_value_type.yaml.wasm
new file mode 100644
index 0000000..4ba291d
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/block_value_type.yaml.wasm
@@ -0,0 +1,18 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 027F41110B0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/branch_if.yaml.wasm b/mlir/test/Target/Wasm/inputs/branch_if.yaml.wasm
new file mode 100644
index 0000000..40536ed
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/branch_if.yaml.wasm
@@ -0,0 +1,18 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 027F410141020D0041016A0B0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/call.yaml.wasm b/mlir/test/Target/Wasm/inputs/call.yaml.wasm
new file mode 100644
index 0000000..535a623
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/call.yaml.wasm
@@ -0,0 +1,26 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0 ]
+ - Type: EXPORT
+ Exports:
+ - Name: forty_two
+ Kind: FUNCTION
+ Index: 1
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 412A0B
+ - Index: 1
+ Locals: []
+ Body: 10000B
+...
diff --git a/mlir/test/Target/Wasm/inputs/comparison_ops.yaml.wasm b/mlir/test/Target/Wasm/inputs/comparison_ops.yaml.wasm
new file mode 100644
index 0000000..cde9ee1
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/comparison_ops.yaml.wasm
@@ -0,0 +1,88 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 410C4132480B
+ - Index: 1
+ Locals: []
+ Body: 410C41324C0B
+ - Index: 2
+ Locals: []
+ Body: 410C4132490B
+ - Index: 3
+ Locals: []
+ Body: 410C41324D0B
+ - Index: 4
+ Locals: []
+ Body: 410C41324A0B
+ - Index: 5
+ Locals: []
+ Body: 410C41324B0B
+ - Index: 6
+ Locals: []
+ Body: 410C41324E0B
+ - Index: 7
+ Locals: []
+ Body: 410C41324F0B
+ - Index: 8
+ Locals: []
+ Body: 420C4232530B
+ - Index: 9
+ Locals: []
+ Body: 420C4232570B
+ - Index: 10
+ Locals: []
+ Body: 420C4232540B
+ - Index: 11
+ Locals: []
+ Body: 420C4232580B
+ - Index: 12
+ Locals: []
+ Body: 420C4232550B
+ - Index: 13
+ Locals: []
+ Body: 420C4232560B
+ - Index: 14
+ Locals: []
+ Body: 420C4232590B
+ - Index: 15
+ Locals: []
+ Body: 420C42325A0B
+ - Index: 16
+ Locals: []
+ Body: 430000A04043000060415D0B
+ - Index: 17
+ Locals: []
+ Body: 430000A04043000060415F0B
+ - Index: 18
+ Locals: []
+ Body: 430000A04043000060415E0B
+ - Index: 19
+ Locals: []
+ Body: 430000A0404300006041600B
+ - Index: 20
+ Locals: []
+ Body: 440000000000001440440000000000002C40630B
+ - Index: 21
+ Locals: []
+ Body: 440000000000001440440000000000002C40650B
+ - Index: 22
+ Locals: []
+ Body: 440000000000001440440000000000002C40640B
+ - Index: 23
+ Locals: []
+ Body: 440000000000001440440000000000002C40660B
+...
diff --git a/mlir/test/Target/Wasm/inputs/convert.yaml.wasm b/mlir/test/Target/Wasm/inputs/convert.yaml.wasm
new file mode 100644
index 0000000..c346a75
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/convert.yaml.wasm
@@ -0,0 +1,69 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - F32
+ - Index: 1
+ ParamTypes: []
+ ReturnTypes:
+ - F64
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0, 0, 0, 1, 1, 1, 1 ]
+ - Type: EXPORT
+ Exports:
+ - Name: convert_i32_u_to_f32
+ Kind: FUNCTION
+ Index: 0
+ - Name: convert_i32_s_to_f32
+ Kind: FUNCTION
+ Index: 1
+ - Name: convert_i64_u_to_f32
+ Kind: FUNCTION
+ Index: 2
+ - Name: convert_i64s_to_f32
+ Kind: FUNCTION
+ Index: 3
+ - Name: convert_i32_u_to_f64
+ Kind: FUNCTION
+ Index: 4
+ - Name: convert_i32_s_to_f64
+ Kind: FUNCTION
+ Index: 5
+ - Name: convert_i64_u_to_f64
+ Kind: FUNCTION
+ Index: 6
+ - Name: convert_i64s_to_f64
+ Kind: FUNCTION
+ Index: 7
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 410AB30B
+ - Index: 1
+ Locals: []
+ Body: 412AB20B
+ - Index: 2
+ Locals: []
+ Body: 4211B50B
+ - Index: 3
+ Locals: []
+ Body: 420AB40B
+ - Index: 4
+ Locals: []
+ Body: 410AB80B
+ - Index: 5
+ Locals: []
+ Body: 412AB70B
+ - Index: 6
+ Locals: []
+ Body: 4211BA0B
+ - Index: 7
+ Locals: []
+ Body: 420AB90B
+...
diff --git a/mlir/test/Target/Wasm/inputs/demote.yaml.wasm b/mlir/test/Target/Wasm/inputs/demote.yaml.wasm
new file mode 100644
index 0000000..3997045
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/demote.yaml.wasm
@@ -0,0 +1,18 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - F32
+ - Type: FUNCTION
+ FunctionTypes: [ 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 44EC51B81E85EB0140B60B
+...
diff --git a/mlir/test/Target/Wasm/inputs/double_nested_loop.yaml.wasm b/mlir/test/Target/Wasm/inputs/double_nested_loop.yaml.wasm
new file mode 100644
index 0000000..41a2944
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/double_nested_loop.yaml.wasm
@@ -0,0 +1,19 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes: []
+ - Type: FUNCTION
+ FunctionTypes: [ 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals:
+ - Type: I32
+ Count: 2
+ Body: 0340200041016A2100037F41012001410C6A220120004A0D000B410A480D000B0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/empty_blocks_list_and_stack.yaml.wasm b/mlir/test/Target/Wasm/inputs/empty_blocks_list_and_stack.yaml.wasm
new file mode 100644
index 0000000..3171409
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/empty_blocks_list_and_stack.yaml.wasm
@@ -0,0 +1,21 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes:
+ - I32
+ ReturnTypes: []
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 0240024002400B0B0B0B
+ - Index: 1
+ Locals: []
+ Body: 02400B02400B02400B0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/eq.yaml.wasm b/mlir/test/Target/Wasm/inputs/eq.yaml.wasm
new file mode 100644
index 0000000..1998369
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/eq.yaml.wasm
@@ -0,0 +1,27 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0, 0, 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 410C4132460B
+ - Index: 1
+ Locals: []
+ Body: 42144205510B
+ - Index: 2
+ Locals: []
+ Body: 430000A04043000060415B0B
+ - Index: 3
+ Locals: []
+ Body: 440000000000003140440000000000000000610B
+...
diff --git a/mlir/test/Target/Wasm/inputs/eqz.yaml.wasm b/mlir/test/Target/Wasm/inputs/eqz.yaml.wasm
new file mode 100644
index 0000000..894ac50
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/eqz.yaml.wasm
@@ -0,0 +1,29 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0 ]
+ - Type: EXPORT
+ Exports:
+ - Name: eqz_i32
+ Kind: FUNCTION
+ Index: 0
+ - Name: eqz_i64
+ Kind: FUNCTION
+ Index: 1
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 410D450B
+ - Index: 1
+ Locals: []
+ Body: 420D500B
+...
diff --git a/mlir/test/Target/Wasm/inputs/extend.yaml.wasm b/mlir/test/Target/Wasm/inputs/extend.yaml.wasm
new file mode 100644
index 0000000..7e872ba
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/extend.yaml.wasm
@@ -0,0 +1,40 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - I64
+ - Index: 1
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0, 1, 1, 0, 0, 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 410AAC0B
+ - Index: 1
+ Locals: []
+ Body: 410AAD0B
+ - Index: 2
+ Locals: []
+ Body: 410AC00B
+ - Index: 3
+ Locals: []
+ Body: 410AC10B
+ - Index: 4
+ Locals: []
+ Body: 420AC20B
+ - Index: 5
+ Locals: []
+ Body: 420AC30B
+ - Index: 6
+ Locals: []
+ Body: 420AC40B
+...
diff --git a/mlir/test/Target/Wasm/inputs/if.yaml.wasm b/mlir/test/Target/Wasm/inputs/if.yaml.wasm
new file mode 100644
index 0000000..ccc38f6
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/if.yaml.wasm
@@ -0,0 +1,25 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes:
+ - I32
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0, 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 2000410171047F200041036C41016A0520004101760B0B
+ - Index: 1
+ Locals: []
+ Body: 20002000410171040041016A0B0B
+ - Index: 2
+ Locals: []
+ Body: 200068047F4102200041017668040041026A0B0541010B0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/loop.yaml.wasm b/mlir/test/Target/Wasm/inputs/loop.yaml.wasm
new file mode 100644
index 0000000..9d33894
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/loop.yaml.wasm
@@ -0,0 +1,17 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes: []
+ - Type: FUNCTION
+ FunctionTypes: [ 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 03400B0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/loop_with_inst.yaml.wasm b/mlir/test/Target/Wasm/inputs/loop_with_inst.yaml.wasm
new file mode 100644
index 0000000..4b8cc54
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/loop_with_inst.yaml.wasm
@@ -0,0 +1,20 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals:
+ - Type: I32
+ Count: 1
+ Body: 037F200041016A21002000410A480B0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/ne.yaml.wasm b/mlir/test/Target/Wasm/inputs/ne.yaml.wasm
new file mode 100644
index 0000000..0167519
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/ne.yaml.wasm
@@ -0,0 +1,27 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0, 0, 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 410C4132470B
+ - Index: 1
+ Locals: []
+ Body: 42144205520B
+ - Index: 2
+ Locals: []
+ Body: 430000A04043000060415C0B
+ - Index: 3
+ Locals: []
+ Body: 440000000000003140440000000000000000620B
+...
diff --git a/mlir/test/Target/Wasm/inputs/promote.yaml.wasm b/mlir/test/Target/Wasm/inputs/promote.yaml.wasm
new file mode 100644
index 0000000..d38603e
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/promote.yaml.wasm
@@ -0,0 +1,18 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - F64
+ - Type: FUNCTION
+ FunctionTypes: [ 0 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 4300002841BB0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/reinterpret.yaml.wasm b/mlir/test/Target/Wasm/inputs/reinterpret.yaml.wasm
new file mode 100644
index 0000000..c01c1b1
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/reinterpret.yaml.wasm
@@ -0,0 +1,53 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Index: 1
+ ParamTypes: []
+ ReturnTypes:
+ - I64
+ - Index: 2
+ ParamTypes: []
+ ReturnTypes:
+ - F32
+ - Index: 3
+ ParamTypes: []
+ ReturnTypes:
+ - F64
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 1, 2, 3 ]
+ - Type: EXPORT
+ Exports:
+ - Name: i32.reinterpret_f32
+ Kind: FUNCTION
+ Index: 0
+ - Name: i64.reinterpret_f64
+ Kind: FUNCTION
+ Index: 1
+ - Name: f32.reinterpret_i32
+ Kind: FUNCTION
+ Index: 2
+ - Name: f64.reinterpret_i64
+ Kind: FUNCTION
+ Index: 3
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 43000080BFBC0B
+ - Index: 1
+ Locals: []
+ Body: 44000000000000F0BFBD0B
+ - Index: 2
+ Locals: []
+ Body: 417FBE0B
+ - Index: 3
+ Locals: []
+ Body: 427FBF0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/rounding.yaml.wasm b/mlir/test/Target/Wasm/inputs/rounding.yaml.wasm
new file mode 100644
index 0000000..c6e8bf6
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/rounding.yaml.wasm
@@ -0,0 +1,37 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes:
+ - F64
+ - Index: 1
+ ParamTypes: []
+ ReturnTypes:
+ - F32
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 1, 0, 1, 0, 1 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 4433333333333328C09B0B
+ - Index: 1
+ Locals: []
+ Body: 43A01ACF3F8D0B
+ - Index: 2
+ Locals: []
+ Body: 4433333333333328C09C0B
+ - Index: 3
+ Locals: []
+ Body: 43A01ACF3F8E0B
+ - Index: 4
+ Locals: []
+ Body: 4433333333333328C09D0B
+ - Index: 5
+ Locals: []
+ Body: 43A01ACF3F8F0B
+...
diff --git a/mlir/test/Target/Wasm/inputs/wrap.yaml.wasm b/mlir/test/Target/Wasm/inputs/wrap.yaml.wasm
new file mode 100644
index 0000000..51c0b02
--- /dev/null
+++ b/mlir/test/Target/Wasm/inputs/wrap.yaml.wasm
@@ -0,0 +1,24 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes:
+ - I64
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0 ]
+ - Type: EXPORT
+ Exports:
+ - Name: i64_wrap
+ Kind: FUNCTION
+ Index: 0
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 2000A70B
+...
diff --git a/mlir/test/Target/Wasm/invalid_block_type_index.yaml b/mlir/test/Target/Wasm/invalid_block_type_index.yaml
new file mode 100644
index 0000000..5b83e2e
--- /dev/null
+++ b/mlir/test/Target/Wasm/invalid_block_type_index.yaml
@@ -0,0 +1,28 @@
+
+# RUN: yaml2obj %s | not mlir-translate --import-wasm -o - 2>&1 | FileCheck %s
+
+# CHECK: type index references nonexistent type (2)
+
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes:
+ - I32
+ ReturnTypes:
+ - I32
+ - Index: 1
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 1 ]
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 410E020241016A0B0B
+# -----------------------------^^ Invalid type ID
diff --git a/mlir/test/Target/Wasm/local.mlir b/mlir/test/Target/Wasm/local.mlir
index 32f5900..9844f9c 100644
--- a/mlir/test/Target/Wasm/local.mlir
+++ b/mlir/test/Target/Wasm/local.mlir
@@ -29,7 +29,7 @@
)
*/
-// CHECK-LABEL: wasmssa.func nested @func_0() -> f32 {
+// CHECK-LABEL: wasmssa.func @func_0() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.local of type f32
// CHECK: %[[VAL_1:.*]] = wasmssa.local of type f32
// CHECK: %[[VAL_2:.*]] = wasmssa.const 8.000000e+00 : f32
@@ -40,7 +40,7 @@
// CHECK: %[[VAL_6:.*]] = wasmssa.add %[[VAL_3]] %[[VAL_5]] : f32
// CHECK: wasmssa.return %[[VAL_6]] : f32
-// CHECK-LABEL: wasmssa.func nested @func_1() -> i32 {
+// CHECK-LABEL: wasmssa.func @func_1() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.local of type i32
// CHECK: %[[VAL_1:.*]] = wasmssa.local of type i32
// CHECK: %[[VAL_2:.*]] = wasmssa.const 8 : i32
@@ -51,7 +51,7 @@
// CHECK: %[[VAL_6:.*]] = wasmssa.add %[[VAL_3]] %[[VAL_5]] : i32
// CHECK: wasmssa.return %[[VAL_6]] : i32
-// CHECK-LABEL: wasmssa.func nested @func_2(
+// CHECK-LABEL: wasmssa.func @func_2(
// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 3 : i32
// CHECK: wasmssa.local_set %[[ARG0]] : ref to i32 to %[[VAL_0]] : i32
diff --git a/mlir/test/Target/Wasm/loop.mlir b/mlir/test/Target/Wasm/loop.mlir
new file mode 100644
index 0000000..29ad502
--- /dev/null
+++ b/mlir/test/Target/Wasm/loop.mlir
@@ -0,0 +1,17 @@
+// RUN: yaml2obj %S/inputs/loop.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* IR generated from:
+(module
+ (func
+ (loop $my_loop
+ )
+ )
+)*/
+
+// CHECK-LABEL: wasmssa.func @func_0() {
+// CHECK: wasmssa.loop : {
+// CHECK: wasmssa.block_return
+// CHECK: }> ^bb1
+// CHECK: ^bb1:
+// CHECK: wasmssa.return
+// CHECK: }
diff --git a/mlir/test/Target/Wasm/loop_with_inst.mlir b/mlir/test/Target/Wasm/loop_with_inst.mlir
new file mode 100644
index 0000000..311d007
--- /dev/null
+++ b/mlir/test/Target/Wasm/loop_with_inst.mlir
@@ -0,0 +1,33 @@
+// RUN: yaml2obj %S/inputs/loop_with_inst.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Code used to create this test:
+
+(module
+ (func (result i32)
+ (local $i i32)
+ (loop $my_loop (result i32)
+ local.get $i
+ i32.const 1
+ i32.add
+ local.set $i
+ local.get $i
+ i32.const 10
+ i32.lt_s
+ )
+ )
+)*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.local of type i32
+// CHECK: wasmssa.loop : {
+// CHECK: %[[VAL_1:.*]] = wasmssa.local_get %[[VAL_0]] : ref to i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.const 1 : i32
+// CHECK: %[[VAL_3:.*]] = wasmssa.add %[[VAL_1]] %[[VAL_2]] : i32
+// CHECK: wasmssa.local_set %[[VAL_0]] : ref to i32 to %[[VAL_3]] : i32
+// CHECK: %[[VAL_4:.*]] = wasmssa.local_get %[[VAL_0]] : ref to i32
+// CHECK: %[[VAL_5:.*]] = wasmssa.const 10 : i32
+// CHECK: %[[VAL_6:.*]] = wasmssa.lt_si %[[VAL_4]] %[[VAL_5]] : i32 -> i32
+// CHECK: wasmssa.block_return %[[VAL_6]] : i32
+// CHECK: }> ^bb1
+// CHECK: ^bb1(%[[VAL_7:.*]]: i32):
+// CHECK: wasmssa.return %[[VAL_7]] : i32
diff --git a/mlir/test/Target/Wasm/max.mlir b/mlir/test/Target/Wasm/max.mlir
index 4ef2042..9160bde 100644
--- a/mlir/test/Target/Wasm/max.mlir
+++ b/mlir/test/Target/Wasm/max.mlir
@@ -16,14 +16,14 @@
)
*/
-// CHECK-LABEL: wasmssa.func @min_f32() -> f32 {
+// CHECK-LABEL: wasmssa.func exported @min_f32() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f32
// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.000000e+00 : f32
// CHECK: %[[VAL_2:.*]] = wasmssa.max %[[VAL_0]] %[[VAL_1]] : f32
// CHECK: wasmssa.return %[[VAL_2]] : f32
-// CHECK-LABEL: wasmssa.func @min_f64() -> f64 {
+// CHECK-LABEL: wasmssa.func exported @min_f64() -> f64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f64
// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.000000e+00 : f64
// CHECK: %[[VAL_2:.*]] = wasmssa.max %[[VAL_0]] %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/memory_min_eq_max.mlir b/mlir/test/Target/Wasm/memory_min_eq_max.mlir
index 2ba5ab5..ea8f719 100644
--- a/mlir/test/Target/Wasm/memory_min_eq_max.mlir
+++ b/mlir/test/Target/Wasm/memory_min_eq_max.mlir
@@ -4,4 +4,4 @@
(module (memory 0 0))
*/
-// CHECK-LABEL: wasmssa.memory @mem_0 nested !wasmssa<limit[0: 0]>
+// CHECK-LABEL: wasmssa.memory @mem_0 !wasmssa<limit[0: 0]>
diff --git a/mlir/test/Target/Wasm/memory_min_max.mlir b/mlir/test/Target/Wasm/memory_min_max.mlir
index ebf6418..88782ec 100644
--- a/mlir/test/Target/Wasm/memory_min_max.mlir
+++ b/mlir/test/Target/Wasm/memory_min_max.mlir
@@ -4,4 +4,4 @@
(module (memory 0 65536))
*/
-// CHECK-LABEL: wasmssa.memory @mem_0 nested !wasmssa<limit[0: 65536]>
+// CHECK-LABEL: wasmssa.memory @mem_0 !wasmssa<limit[0: 65536]>
diff --git a/mlir/test/Target/Wasm/memory_min_no_max.mlir b/mlir/test/Target/Wasm/memory_min_no_max.mlir
index 8d88786..c10c5cc 100644
--- a/mlir/test/Target/Wasm/memory_min_no_max.mlir
+++ b/mlir/test/Target/Wasm/memory_min_no_max.mlir
@@ -4,4 +4,4 @@
(module (memory 1))
*/
-// CHECK-LABEL: wasmssa.memory @mem_0 nested !wasmssa<limit[1:]>
+// CHECK-LABEL: wasmssa.memory @mem_0 !wasmssa<limit[1:]>
diff --git a/mlir/test/Target/Wasm/min.mlir b/mlir/test/Target/Wasm/min.mlir
index 1058c7d..2372bcc 100644
--- a/mlir/test/Target/Wasm/min.mlir
+++ b/mlir/test/Target/Wasm/min.mlir
@@ -16,13 +16,13 @@
)
*/
-// CHECK-LABEL: wasmssa.func @min_f32() -> f32 {
+// CHECK-LABEL: wasmssa.func exported @min_f32() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f32
// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.000000e+00 : f32
// CHECK: %[[VAL_2:.*]] = wasmssa.min %[[VAL_0]] %[[VAL_1]] : f32
// CHECK: wasmssa.return %[[VAL_2]] : f32
-// CHECK-LABEL: wasmssa.func @min_f64() -> f64 {
+// CHECK-LABEL: wasmssa.func exported @min_f64() -> f64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f64
// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.000000e+00 : f64
// CHECK: %[[VAL_2:.*]] = wasmssa.min %[[VAL_0]] %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/ne.mlir b/mlir/test/Target/Wasm/ne.mlir
new file mode 100644
index 0000000..331df75
--- /dev/null
+++ b/mlir/test/Target/Wasm/ne.mlir
@@ -0,0 +1,52 @@
+// RUN: yaml2obj %S/inputs/ne.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+/* Source code used to create this test:
+(module
+ (func $ne_i32 (result i32)
+ i32.const 12
+ i32.const 50
+ i32.ne
+ )
+
+ (func $ne_i64 (result i32)
+ i64.const 20
+ i64.const 5
+ i64.ne
+ )
+
+ (func $ne_f32 (result i32)
+ f32.const 5
+ f32.const 14
+ f32.ne
+ )
+
+ (func $ne_f64 (result i32)
+ f64.const 17
+ f64.const 0
+ f64.ne
+ )
+)
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.ne %[[VAL_0]] %[[VAL_1]] : i32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_1() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 20 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 5 : i64
+// CHECK: %[[VAL_2:.*]] = wasmssa.ne %[[VAL_0]] %[[VAL_1]] : i64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_2() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f32
+// CHECK: %[[VAL_2:.*]] = wasmssa.ne %[[VAL_0]] %[[VAL_1]] : f32 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
+
+// CHECK-LABEL: wasmssa.func @func_3() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.700000e+01 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 0.000000e+00 : f64
+// CHECK: %[[VAL_2:.*]] = wasmssa.ne %[[VAL_0]] %[[VAL_1]] : f64 -> i32
+// CHECK: wasmssa.return %[[VAL_2]] : i32
diff --git a/mlir/test/Target/Wasm/neg.mlir b/mlir/test/Target/Wasm/neg.mlir
index 5811ab50..dae8ee5 100644
--- a/mlir/test/Target/Wasm/neg.mlir
+++ b/mlir/test/Target/Wasm/neg.mlir
@@ -12,12 +12,12 @@
)
*/
-// CHECK-LABEL: wasmssa.func @neg_f32() -> f32 {
+// CHECK-LABEL: wasmssa.func exported @neg_f32() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f32
// CHECK: %[[VAL_1:.*]] = wasmssa.neg %[[VAL_0]] : f32
// CHECK: wasmssa.return %[[VAL_1]] : f32
-// CHECK-LABEL: wasmssa.func @neg_f64() -> f64 {
+// CHECK-LABEL: wasmssa.func exported @neg_f64() -> f64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f64
// CHECK: %[[VAL_1:.*]] = wasmssa.neg %[[VAL_0]] : f64
// CHECK: wasmssa.return %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/or.mlir b/mlir/test/Target/Wasm/or.mlir
index 521f2ba..be0b3d7 100644
--- a/mlir/test/Target/Wasm/or.mlir
+++ b/mlir/test/Target/Wasm/or.mlir
@@ -14,13 +14,13 @@
)
*/
-// CHECK-LABEL: wasmssa.func @or_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @or_i32() -> i32 {
// CHECK: %0 = wasmssa.const 10 : i32
// CHECK: %1 = wasmssa.const 3 : i32
// CHECK: %2 = wasmssa.or %0 %1 : i32
// CHECK: wasmssa.return %2 : i32
-// CHECK-LABEL: wasmssa.func @or_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @or_i64() -> i64 {
// CHECK: %0 = wasmssa.const 10 : i64
// CHECK: %1 = wasmssa.const 3 : i64
// CHECK: %2 = wasmssa.or %0 %1 : i64
diff --git a/mlir/test/Target/Wasm/popcnt.mlir b/mlir/test/Target/Wasm/popcnt.mlir
index 235333a..bfaa8eb 100644
--- a/mlir/test/Target/Wasm/popcnt.mlir
+++ b/mlir/test/Target/Wasm/popcnt.mlir
@@ -14,12 +14,12 @@
)
*/
-// CHECK-LABEL: wasmssa.func @popcnt_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @popcnt_i32() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i32
// CHECK: %[[VAL_1:.*]] = wasmssa.popcnt %[[VAL_0]] : i32
// CHECK: wasmssa.return %[[VAL_1]] : i32
-// CHECK-LABEL: wasmssa.func @popcnt_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @popcnt_i64() -> i64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 10 : i64
// CHECK: %[[VAL_1:.*]] = wasmssa.popcnt %[[VAL_0]] : i64
// CHECK: wasmssa.return %[[VAL_1]] : i64
diff --git a/mlir/test/Target/Wasm/promote.mlir b/mlir/test/Target/Wasm/promote.mlir
new file mode 100644
index 0000000..44c31b6
--- /dev/null
+++ b/mlir/test/Target/Wasm/promote.mlir
@@ -0,0 +1,14 @@
+// RUN: yaml2obj %S/inputs/promote.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/* Source code used to generate this test:
+(module
+ (func $main (result f64)
+ f32.const 10.5
+ f64.promote_f32
+ )
+)*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> f64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.050000e+01 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.promote %[[VAL_0]] : f32 to f64
+// CHECK: wasmssa.return %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/reinterpret.mlir b/mlir/test/Target/Wasm/reinterpret.mlir
new file mode 100644
index 0000000..574d13f
--- /dev/null
+++ b/mlir/test/Target/Wasm/reinterpret.mlir
@@ -0,0 +1,46 @@
+// RUN: yaml2obj %S/inputs/reinterpret.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+
+/*
+Test generated from:
+(module
+ (func (export "i32.reinterpret_f32") (result i32)
+ f32.const -1
+ i32.reinterpret_f32
+ )
+
+ (func (export "i64.reinterpret_f64") (result i64)
+ f64.const -1
+ i64.reinterpret_f64
+ )
+
+ (func (export "f32.reinterpret_i32") (result f32)
+ i32.const -1
+ f32.reinterpret_i32
+ )
+
+ (func (export "f64.reinterpret_i64") (result f64)
+ i64.const -1
+ f64.reinterpret_i64
+ )
+)
+*/
+
+// CHECK-LABEL: wasmssa.func exported @i32.reinterpret_f32() -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const -1.000000e+00 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.reinterpret %[[VAL_0]] : f32 as i32
+// CHECK: wasmssa.return %[[VAL_1]] : i32
+
+// CHECK-LABEL: wasmssa.func exported @i64.reinterpret_f64() -> i64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const -1.000000e+00 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.reinterpret %[[VAL_0]] : f64 as i64
+// CHECK: wasmssa.return %[[VAL_1]] : i64
+
+// CHECK-LABEL: wasmssa.func exported @f32.reinterpret_i32() -> f32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const -1 : i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.reinterpret %[[VAL_0]] : i32 as f32
+// CHECK: wasmssa.return %[[VAL_1]] : f32
+
+// CHECK-LABEL: wasmssa.func exported @f64.reinterpret_i64() -> f64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const -1 : i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.reinterpret %[[VAL_0]] : i64 as f64
+// CHECK: wasmssa.return %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/rem.mlir b/mlir/test/Target/Wasm/rem.mlir
index b19b8d9..16c9c78 100644
--- a/mlir/test/Target/Wasm/rem.mlir
+++ b/mlir/test/Target/Wasm/rem.mlir
@@ -24,28 +24,28 @@
)
*/
-// CHECK-LABEL: wasmssa.func @rem_u_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @rem_u_i32() -> i32 {
// CHECK: %0 = wasmssa.const 10 : i32
// CHECK: %1 = wasmssa.const 3 : i32
// CHECK: %2 = wasmssa.rem_ui %0 %1 : i32
// CHECK: wasmssa.return %2 : i32
// CHECK: }
-// CHECK-LABEL: wasmssa.func @rem_u_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @rem_u_i64() -> i64 {
// CHECK: %0 = wasmssa.const 10 : i64
// CHECK: %1 = wasmssa.const 3 : i64
// CHECK: %2 = wasmssa.rem_ui %0 %1 : i64
// CHECK: wasmssa.return %2 : i64
// CHECK: }
-// CHECK-LABEL: wasmssa.func @rem_s_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @rem_s_i32() -> i32 {
// CHECK: %0 = wasmssa.const 10 : i32
// CHECK: %1 = wasmssa.const 3 : i32
// CHECK: %2 = wasmssa.rem_si %0 %1 : i32
// CHECK: wasmssa.return %2 : i32
// CHECK: }
-// CHECK-LABEL: wasmssa.func @rem_s_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @rem_s_i64() -> i64 {
// CHECK: %0 = wasmssa.const 10 : i64
// CHECK: %1 = wasmssa.const 3 : i64
// CHECK: %2 = wasmssa.rem_si %0 %1 : i64
diff --git a/mlir/test/Target/Wasm/rotl.mlir b/mlir/test/Target/Wasm/rotl.mlir
index ec573554..4c2e5af 100644
--- a/mlir/test/Target/Wasm/rotl.mlir
+++ b/mlir/test/Target/Wasm/rotl.mlir
@@ -14,13 +14,13 @@
)
*/
-// CHECK-LABEL: wasmssa.func @rotl_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @rotl_i32() -> i32 {
// CHECK: %0 = wasmssa.const 10 : i32
// CHECK: %1 = wasmssa.const 3 : i32
// CHECK: %2 = wasmssa.rotl %0 by %1 bits : i32
// CHECK: wasmssa.return %2 : i32
-// CHECK-LABEL: wasmssa.func @rotl_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @rotl_i64() -> i64 {
// CHECK: %0 = wasmssa.const 10 : i64
// CHECK: %1 = wasmssa.const 3 : i64
// CHECK: %2 = wasmssa.rotl %0 by %1 bits : i64
diff --git a/mlir/test/Target/Wasm/rotr.mlir b/mlir/test/Target/Wasm/rotr.mlir
index 5618b43..ec403d0 100644
--- a/mlir/test/Target/Wasm/rotr.mlir
+++ b/mlir/test/Target/Wasm/rotr.mlir
@@ -14,13 +14,13 @@
)
*/
-// CHECK-LABEL: wasmssa.func @rotr_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @rotr_i32() -> i32 {
// CHECK: %0 = wasmssa.const 10 : i32
// CHECK: %1 = wasmssa.const 3 : i32
// CHECK: %2 = wasmssa.rotr %0 by %1 bits : i32
// CHECK: wasmssa.return %2 : i32
-// CHECK-LABEL: wasmssa.func @rotr_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @rotr_i64() -> i64 {
// CHECK: %0 = wasmssa.const 10 : i64
// CHECK: %1 = wasmssa.const 3 : i64
// CHECK: %2 = wasmssa.rotr %0 by %1 bits : i64
diff --git a/mlir/test/Target/Wasm/rounding.mlir b/mlir/test/Target/Wasm/rounding.mlir
new file mode 100644
index 0000000..947637e
--- /dev/null
+++ b/mlir/test/Target/Wasm/rounding.mlir
@@ -0,0 +1,50 @@
+// RUN: yaml2obj %S/inputs/rounding.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+/* Source code used to create this test:
+(module
+ (func $ceil_f64 (result f64)
+ f64.const -12.1
+ f64.ceil
+ )
+ (func $ceil_f32 (result f32)
+ f32.const 1.618
+ f32.ceil
+ )
+ (func $floor_f64 (result f64)
+ f64.const -12.1
+ f64.floor
+ )
+ (func $floor_f32 (result f32)
+ f32.const 1.618
+ f32.floor
+ )
+*/
+
+// CHECK-LABEL: wasmssa.func @func_0() -> f64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const -1.210000e+01 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.ceil %[[VAL_0]] : f64
+// CHECK: wasmssa.return %[[VAL_1]] : f64
+
+// CHECK-LABEL: wasmssa.func @func_1() -> f32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.618000e+00 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.ceil %[[VAL_0]] : f32
+// CHECK: wasmssa.return %[[VAL_1]] : f32
+
+// CHECK-LABEL: wasmssa.func @func_2() -> f64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const -1.210000e+01 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.floor %[[VAL_0]] : f64
+// CHECK: wasmssa.return %[[VAL_1]] : f64
+
+// CHECK-LABEL: wasmssa.func @func_3() -> f32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.618000e+00 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.floor %[[VAL_0]] : f32
+// CHECK: wasmssa.return %[[VAL_1]] : f32
+
+// CHECK-LABEL: wasmssa.func @func_4() -> f64 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const -1.210000e+01 : f64
+// CHECK: %[[VAL_1:.*]] = wasmssa.trunc %[[VAL_0]] : f64
+// CHECK: wasmssa.return %[[VAL_1]] : f64
+
+// CHECK-LABEL: wasmssa.func @func_5() -> f32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.618000e+00 : f32
+// CHECK: %[[VAL_1:.*]] = wasmssa.trunc %[[VAL_0]] : f32
+// CHECK: wasmssa.return %[[VAL_1]] : f32
diff --git a/mlir/test/Target/Wasm/shl.mlir b/mlir/test/Target/Wasm/shl.mlir
index f2bdd57..1363112 100644
--- a/mlir/test/Target/Wasm/shl.mlir
+++ b/mlir/test/Target/Wasm/shl.mlir
@@ -14,13 +14,13 @@
)
*/
-// CHECK-LABEL: wasmssa.func @shl_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @shl_i32() -> i32 {
// CHECK: %0 = wasmssa.const 10 : i32
// CHECK: %1 = wasmssa.const 3 : i32
// CHECK: %2 = wasmssa.shl %0 by %1 bits : i32
// CHECK: wasmssa.return %2 : i32
-// CHECK-LABEL: wasmssa.func @shl_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @shl_i64() -> i64 {
// CHECK: %0 = wasmssa.const 10 : i64
// CHECK: %1 = wasmssa.const 3 : i64
// CHECK: %2 = wasmssa.shl %0 by %1 bits : i64
diff --git a/mlir/test/Target/Wasm/shr_s.mlir b/mlir/test/Target/Wasm/shr_s.mlir
index 247d9be..da1a38f 100644
--- a/mlir/test/Target/Wasm/shr_s.mlir
+++ b/mlir/test/Target/Wasm/shr_s.mlir
@@ -14,13 +14,13 @@
)
*/
-// CHECK-LABEL: wasmssa.func @shr_s_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @shr_s_i32() -> i32 {
// CHECK: %0 = wasmssa.const 10 : i32
// CHECK: %1 = wasmssa.const 3 : i32
// CHECK: %2 = wasmssa.shr_s %0 by %1 bits : i32
// CHECK: wasmssa.return %2 : i32
-// CHECK-LABEL: wasmssa.func @shr_s_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @shr_s_i64() -> i64 {
// CHECK: %0 = wasmssa.const 10 : i64
// CHECK: %1 = wasmssa.const 3 : i64
// CHECK: %2 = wasmssa.shr_s %0 by %1 bits : i64
diff --git a/mlir/test/Target/Wasm/shr_u.mlir b/mlir/test/Target/Wasm/shr_u.mlir
index 9a79eed..2991c2a 100644
--- a/mlir/test/Target/Wasm/shr_u.mlir
+++ b/mlir/test/Target/Wasm/shr_u.mlir
@@ -14,13 +14,13 @@
)
*/
-// CHECK-LABEL: wasmssa.func @shr_u_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @shr_u_i32() -> i32 {
// CHECK: %0 = wasmssa.const 10 : i32
// CHECK: %1 = wasmssa.const 3 : i32
// CHECK: %2 = wasmssa.shr_u %0 by %1 bits : i32
// CHECK: wasmssa.return %2 : i32
-// CHECK-LABEL: wasmssa.func @shr_u_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @shr_u_i64() -> i64 {
// CHECK: %0 = wasmssa.const 10 : i64
// CHECK: %1 = wasmssa.const 3 : i64
// CHECK: %2 = wasmssa.shr_u %0 by %1 bits : i64
diff --git a/mlir/test/Target/Wasm/sqrt.mlir b/mlir/test/Target/Wasm/sqrt.mlir
index 77444ad..6b968d6 100644
--- a/mlir/test/Target/Wasm/sqrt.mlir
+++ b/mlir/test/Target/Wasm/sqrt.mlir
@@ -12,12 +12,12 @@
)
*/
-// CHECK-LABEL: wasmssa.func @sqrt_f32() -> f32 {
+// CHECK-LABEL: wasmssa.func exported @sqrt_f32() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f32
// CHECK: %[[VAL_1:.*]] = wasmssa.sqrt %[[VAL_0]] : f32
// CHECK: wasmssa.return %[[VAL_1]] : f32
-// CHECK-LABEL: wasmssa.func @sqrt_f64() -> f64 {
+// CHECK-LABEL: wasmssa.func exported @sqrt_f64() -> f64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.000000e+01 : f64
// CHECK: %[[VAL_1:.*]] = wasmssa.sqrt %[[VAL_0]] : f64
// CHECK: wasmssa.return %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/sub.mlir b/mlir/test/Target/Wasm/sub.mlir
index b9c6caf..5b242f4 100644
--- a/mlir/test/Target/Wasm/sub.mlir
+++ b/mlir/test/Target/Wasm/sub.mlir
@@ -27,25 +27,25 @@
)
*/
-// CHECK-LABEL: wasmssa.func nested @func_0() -> i32 {
+// CHECK-LABEL: wasmssa.func @func_0() -> i32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 12 : i32
// CHECK: %[[VAL_1:.*]] = wasmssa.const 50 : i32
// CHECK: %[[VAL_2:.*]] = wasmssa.sub %[[VAL_0]] %[[VAL_1]] : i32
// CHECK: wasmssa.return %[[VAL_2]] : i32
-// CHECK-LABEL: wasmssa.func nested @func_1() -> i64 {
+// CHECK-LABEL: wasmssa.func @func_1() -> i64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 20 : i64
// CHECK: %[[VAL_1:.*]] = wasmssa.const 5 : i64
// CHECK: %[[VAL_2:.*]] = wasmssa.sub %[[VAL_0]] %[[VAL_1]] : i64
// CHECK: wasmssa.return %[[VAL_2]] : i64
-// CHECK-LABEL: wasmssa.func nested @func_2() -> f32 {
+// CHECK-LABEL: wasmssa.func @func_2() -> f32 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 5.000000e+00 : f32
// CHECK: %[[VAL_1:.*]] = wasmssa.const 1.400000e+01 : f32
// CHECK: %[[VAL_2:.*]] = wasmssa.sub %[[VAL_0]] %[[VAL_1]] : f32
// CHECK: wasmssa.return %[[VAL_2]] : f32
-// CHECK-LABEL: wasmssa.func nested @func_3() -> f64 {
+// CHECK-LABEL: wasmssa.func @func_3() -> f64 {
// CHECK: %[[VAL_0:.*]] = wasmssa.const 1.700000e+01 : f64
// CHECK: %[[VAL_1:.*]] = wasmssa.const 0.000000e+00 : f64
// CHECK: %[[VAL_2:.*]] = wasmssa.sub %[[VAL_0]] %[[VAL_1]] : f64
diff --git a/mlir/test/Target/Wasm/wrap.mlir b/mlir/test/Target/Wasm/wrap.mlir
new file mode 100644
index 0000000..1266758
--- /dev/null
+++ b/mlir/test/Target/Wasm/wrap.mlir
@@ -0,0 +1,15 @@
+// RUN: yaml2obj %S/inputs/wrap.yaml.wasm -o - | mlir-translate --import-wasm | FileCheck %s
+/* Source code used to create this test:
+(module
+ (func (export "i64_wrap") (param $in i64) (result i32)
+ local.get $in
+ i32.wrap_i64
+ )
+)
+*/
+
+// CHECK-LABEL: wasmssa.func exported @i64_wrap(
+// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i64>) -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.local_get %[[ARG0]] : ref to i64
+// CHECK: %[[VAL_1:.*]] = wasmssa.wrap %[[VAL_0]] : i64 to i32
+// CHECK: wasmssa.return %[[VAL_1]] : i32
diff --git a/mlir/test/Target/Wasm/xor.mlir b/mlir/test/Target/Wasm/xor.mlir
index 94691de..56407db 100644
--- a/mlir/test/Target/Wasm/xor.mlir
+++ b/mlir/test/Target/Wasm/xor.mlir
@@ -14,13 +14,13 @@
)
*/
-// CHECK-LABEL: wasmssa.func @xor_i32() -> i32 {
+// CHECK-LABEL: wasmssa.func exported @xor_i32() -> i32 {
// CHECK: %0 = wasmssa.const 10 : i32
// CHECK: %1 = wasmssa.const 3 : i32
// CHECK: %2 = wasmssa.xor %0 %1 : i32
// CHECK: wasmssa.return %2 : i32
-// CHECK-LABEL: wasmssa.func @xor_i64() -> i64 {
+// CHECK-LABEL: wasmssa.func exported @xor_i64() -> i64 {
// CHECK: %0 = wasmssa.const 10 : i64
// CHECK: %1 = wasmssa.const 3 : i64
// CHECK: %2 = wasmssa.xor %0 %1 : i64
diff --git a/mlir/test/python/dialects/transform_smt_ext.py b/mlir/test/python/dialects/transform_smt_ext.py
index 3692fd9..e28c56f 100644
--- a/mlir/test/python/dialects/transform_smt_ext.py
+++ b/mlir/test/python/dialects/transform_smt_ext.py
@@ -25,26 +25,44 @@ def run(f):
# CHECK-LABEL: TEST: testConstrainParamsOp
@run
def testConstrainParamsOp(target):
- dummy_value = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 42)
+ c42_attr = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 42)
# CHECK: %[[PARAM_AS_PARAM:.*]] = transform.param.constant
- symbolic_value = transform.ParamConstantOp(
- transform.AnyParamType.get(), dummy_value
+ symbolic_value_as_param = transform.ParamConstantOp(
+ transform.AnyParamType.get(), c42_attr
)
# CHECK: transform.smt.constrain_params(%[[PARAM_AS_PARAM]])
constrain_params = transform_smt.ConstrainParamsOp(
- [symbolic_value], [smt.IntType.get()]
+ [], [symbolic_value_as_param], [smt.IntType.get()]
)
# CHECK-NEXT: ^bb{{.*}}(%[[PARAM_AS_SMT_SYMB:.*]]: !smt.int):
with ir.InsertionPoint(constrain_params.body):
+ symbolic_value_as_smt_var = constrain_params.body.arguments[0]
# CHECK: %[[C0:.*]] = smt.int.constant 0
c0 = smt.IntConstantOp(ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 0))
# CHECK: %[[C43:.*]] = smt.int.constant 43
c43 = smt.IntConstantOp(ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 43))
# CHECK: %[[LB:.*]] = smt.int.cmp le %[[C0]], %[[PARAM_AS_SMT_SYMB]]
- lb = smt.IntCmpOp(smt.IntPredicate.le, c0, constrain_params.body.arguments[0])
+ lb = smt.IntCmpOp(smt.IntPredicate.le, c0, symbolic_value_as_smt_var)
# CHECK: %[[UB:.*]] = smt.int.cmp le %[[PARAM_AS_SMT_SYMB]], %[[C43]]
- ub = smt.IntCmpOp(smt.IntPredicate.le, constrain_params.body.arguments[0], c43)
+ ub = smt.IntCmpOp(smt.IntPredicate.le, symbolic_value_as_smt_var, c43)
# CHECK: %[[BOUNDED:.*]] = smt.and %[[LB]], %[[UB]]
bounded = smt.AndOp([lb, ub])
# CHECK: smt.assert %[[BOUNDED:.*]]
smt.AssertOp(bounded)
+ smt.YieldOp([])
+
+ # CHECK: transform.smt.constrain_params(%[[PARAM_AS_PARAM]])
+ compute_with_params = transform_smt.ConstrainParamsOp(
+ [transform.ParamType.get(ir.IntegerType.get_signless(32))],
+ [symbolic_value_as_param],
+ [smt.IntType.get()],
+ )
+ # CHECK-NEXT: ^bb{{.*}}(%[[SMT_SYMB:.*]]: !smt.int):
+ with ir.InsertionPoint(compute_with_params.body):
+ symbolic_value_as_smt_var = compute_with_params.body.arguments[0]
+ # CHECK: %[[TWICE:.*]] = smt.int.add %[[SMT_SYMB]], %[[SMT_SYMB]]
+ twice_symb = smt.IntAddOp(
+ [symbolic_value_as_smt_var, symbolic_value_as_smt_var]
+ )
+ # CHECK: smt.yield %[[TWICE]]
+ smt.YieldOp([twice_symb])
diff --git a/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp b/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp
index 2a58305..2b92e47 100644
--- a/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp
+++ b/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp
@@ -141,7 +141,7 @@ protected:
}
#define IMPL_BINOP_EXPR(OP, KIND) \
- LLVM_ATTRIBUTE_UNUSED ExprId OP##Expr(ExprId e0, ExprId e1) { \
+ [[maybe_unused]] ExprId OP##Expr(ExprId e0, ExprId e1) { \
return merger.addExp(KIND, e0, e1); \
}
FOREVERY_BINOP(IMPL_BINOP_EXPR)
diff --git a/offload/include/OpenMP/InteropAPI.h b/offload/include/OpenMP/InteropAPI.h
index 53ac4be..8c06ba3 100644
--- a/offload/include/OpenMP/InteropAPI.h
+++ b/offload/include/OpenMP/InteropAPI.h
@@ -25,8 +25,8 @@ typedef enum kmp_interop_type_t {
} kmp_interop_type_t;
struct interop_attrs_t {
- bool inorder : 1;
- int reserved : 31;
+ uint32_t inorder : 1;
+ uint32_t reserved : 31;
/// Check if the supported attributes are compatible with the current
/// attributes. Only if an attribute is supported can the value be true,
@@ -44,15 +44,15 @@ struct interop_spec_t {
};
struct interop_flags_t {
- bool implicit : 1; // dispatch (true) or interop (false)
- bool nowait : 1; // has nowait flag
- int reserved : 30;
+ uint32_t implicit : 1; // dispatch (true) or interop (false)
+ uint32_t nowait : 1; // has nowait flag
+ uint32_t reserved : 30;
};
struct interop_ctx_t {
uint32_t version; // version of the interface (current is 0)
interop_flags_t flags;
- int gtid;
+ int32_t gtid;
};
struct dep_pack_t {
diff --git a/offload/libomptarget/OpenMP/InteropAPI.cpp b/offload/libomptarget/OpenMP/InteropAPI.cpp
index c55ef2c..d6ef17c 100644
--- a/offload/libomptarget/OpenMP/InteropAPI.cpp
+++ b/offload/libomptarget/OpenMP/InteropAPI.cpp
@@ -22,8 +22,7 @@ extern "C" {
void __kmpc_omp_wait_deps(ident_t *loc_ref, int32_t gtid, int32_t ndeps,
kmp_depend_info_t *dep_list, int32_t ndeps_noalias,
- kmp_depend_info_t *noalias_dep_list)
- __attribute__((weak));
+ kmp_depend_info_t *noalias_dep_list);
} // extern "C"
diff --git a/polly/lib/Transform/Canonicalization.cpp b/polly/lib/Transform/Canonicalization.cpp
index 748d710..1be560e 100644
--- a/polly/lib/Transform/Canonicalization.cpp
+++ b/polly/lib/Transform/Canonicalization.cpp
@@ -104,8 +104,7 @@ polly::buildCanonicalicationPassesForNPM(llvm::ModulePassManager &MPM,
LoopPassManager LPM;
LPM.addPass(LoopRotatePass(Level != OptimizationLevel::Oz));
FPM.addPass(createFunctionToLoopPassAdaptor<LoopPassManager>(
- std::move(LPM), /*UseMemorySSA=*/false,
- /*UseBlockFrequencyInfo=*/false));
+ std::move(LPM), /*UseMemorySSA=*/false));
}
if (PollyInliner) {
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
@@ -121,8 +120,7 @@ polly::buildCanonicalicationPassesForNPM(llvm::ModulePassManager &MPM,
LoopPassManager LPM;
LPM.addPass(IndVarSimplifyPass());
FPM.addPass(createFunctionToLoopPassAdaptor<LoopPassManager>(
- std::move(LPM), /*UseMemorySSA=*/false,
- /*UseBlockFrequencyInfo=*/true));
+ std::move(LPM), /*UseMemorySSA=*/false));
}
return FPM;
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index c7e3aa6..5d87e32 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -2948,6 +2948,22 @@ libc_support_library(
)
libc_support_library(
+ name = "__support_math_exp2m1f",
+ hdrs = ["src/__support/math/exp2m1f.h"],
+ deps = [
+ ":__support_fputil_except_value_utils",
+ ":__support_fputil_fma",
+ ":__support_fputil_multiply_add",
+ ":__support_fputil_nearest_integer",
+ ":__support_fputil_polyeval",
+ ":__support_fputil_rounding_mode",
+ ":__support_macros_optimization",
+ ":__support_math_common_constants",
+ ":__support_math_exp10f_utils",
+ ],
+)
+
+libc_support_library(
name = "__support_math_exp10",
hdrs = ["src/__support/math/exp10.h"],
deps = [
@@ -3734,8 +3750,7 @@ libc_math_function(
libc_math_function(
name = "exp2m1f",
additional_deps = [
- ":__support_fputil_polyeval",
- ":__support_math_exp10f_utils",
+ ":__support_math_exp2m1f",
],
)
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index ac58e39..a4724b9 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -5512,12 +5512,13 @@ gentbl_cc_library(
cc_library(
name = "GPUPipelines",
- srcs = ["lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp"],
+ srcs = glob(["lib/Dialect/GPU/Pipelines/*.cpp"]),
hdrs = ["include/mlir/Dialect/GPU/Pipelines/Passes.h"],
includes = ["include"],
deps = [
":AffineToStandard",
":ArithToLLVM",
+ ":ConversionPasses",
":FuncDialect",
":FuncToLLVM",
":GPUDialect",
@@ -5526,8 +5527,10 @@ cc_library(
":GPUTransforms",
":IndexToLLVM",
":LLVMDialect",
+ ":LLVMIRTransforms",
":LinalgTransforms",
":MathToLLVM",
+ ":MathToXeVM",
":MemRefToLLVM",
":MemRefTransforms",
":NVGPUToNVVM",
@@ -5538,6 +5541,10 @@ cc_library(
":Transforms",
":VectorToLLVM",
":VectorToSCF",
+ ":XeGPUToXeVM",
+ ":XeGPUTransforms",
+ ":XeVMTarget",
+ ":XeVMToLLVM",
],
)
@@ -7066,6 +7073,7 @@ cc_library(
]),
includes = ["include"],
deps = [
+ ":AMDGPUUtils",
":ConversionPassIncGen",
":DialectUtils",
":FuncDialect",
@@ -7079,6 +7087,7 @@ cc_library(
":ROCDLDialect",
":TransformUtils",
":VectorDialect",
+ "//llvm:Support",
],
)